Skip to content

Commit

Permalink
Use ruff for checking python code
Browse files Browse the repository at this point in the history
  • Loading branch information
pobonomo committed Jan 1, 2025
1 parent 77ba8ed commit 0db5840
Show file tree
Hide file tree
Showing 40 changed files with 166 additions and 150 deletions.
24 changes: 11 additions & 13 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,6 @@ repos:
files: (^(src|tests)/)|(^[^/]*$)
args: [--in-place, --remove-all-unused-imports, --recursive, --ignore-init-module-imports]

- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
args: ["--profile", "black", "--filter-files"]

- repo: https://github.com/kynan/nbstripout
rev: 0.6.0
hooks:
Expand All @@ -31,13 +25,17 @@ repos:
- id: flake8
args: ["--count", "--select=E9,F63,F7,F82", "--show-source", "--statistics"]

- repo: https://github.com/psf/black
rev: 23.10.1
hooks:
- id: black-jupyter
language_version: python3.11
language: python
types: [python]
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.8.2
hooks:
# Run the linter.
- id: ruff
types_or: [ python, pyi, jupyter ]
args: [ --fix ]
# Run the formatter.
- id: ruff-format
types_or: [ python, pyi, jupyter ]

- repo: local
hooks:
Expand Down
4 changes: 0 additions & 4 deletions docs/examples/example2_student_admission.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,17 +81,13 @@
"""

import sys

import gurobipy as gp
import gurobipy_pandas as gppd
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor

from gurobi_ml import add_predictor_constr

Expand Down
28 changes: 10 additions & 18 deletions docs/examples/example4_price_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,20 @@
# the data.
#

import warnings

import gurobipy as gp
import gurobipy_pandas as gppd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn import tree
from sklearn.compose import make_column_transformer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler

from gurobi_ml import add_predictor_constr

######################################################################
# The dataset from HAB contains sales data for the years 2019-2022. This
Expand Down Expand Up @@ -310,8 +316,6 @@ def peak_season(row):
# weights using ``Scikit-learn``.
#

from sklearn.model_selection import train_test_split

X = df[["region", "price", "year", "peak"]]
y = df["units_sold"]
# Split the data for training and testing
Expand All @@ -330,9 +334,6 @@ def peak_season(row):
# ``make_column_transformer``.
#

from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler

feat_transform = make_column_transformer(
(OneHotEncoder(drop="first"), ["region"]),
(StandardScaler(), ["price", "year"]),
Expand All @@ -349,10 +350,6 @@ def peak_season(row):
# Define it and train it.
#

from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline

lin_reg = make_pipeline(feat_transform, LinearRegression())
lin_reg.fit(X_train, y_train)

Expand Down Expand Up @@ -497,9 +494,6 @@ def peak_season(row):
# dataframe.
#

import gurobipy as gp
import gurobipy_pandas as gppd

m = gp.Model("Avocado_Price_Allocation")

p = gppd.add_vars(m, data, name="price", lb=a_min, ub=a_max)
Expand Down Expand Up @@ -655,8 +649,6 @@ def peak_season(row):
# to insert the constraints linking the features and the demand.
#

from gurobi_ml import add_predictor_constr

pred_constr = add_predictor_constr(m, lin_reg, feats, d)

pred_constr.print_stats()
Expand Down
1 change: 0 additions & 1 deletion docs/examples_userguide/example_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import gurobipy as gp
import numpy as np
from sklearn.datasets import make_regression
from sklearn.metrics import mean_squared_error
from sklearn.neural_network import MLPRegressor

from gurobi_ml import add_predictor_constr
Expand Down
24 changes: 16 additions & 8 deletions notebooks/Functions_approx/2DPeakFunction skorch polynomials.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,8 @@
"# Create a small class to apply polynomial features and convert\n",
"\n",
"from sklearn.base import BaseEstimator, TransformerMixin\n",
"\n",
"\n",
"class MyPolynomialFeatures(BaseEstimator, TransformerMixin):\n",
" def __init__(self):\n",
" self.poly_feat = PolynomialFeatures()\n",
Expand All @@ -136,12 +138,12 @@
"hs = 16\n",
"nn_regression = NeuralNetRegressor(\n",
" torch.nn.Sequential(\n",
" torch.nn.Linear(6, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, 1),\n",
"),\n",
" torch.nn.Linear(6, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, 1),\n",
" ),\n",
" max_epochs=20,\n",
" lr=0.1,\n",
" iterator_train__shuffle=True,\n",
Expand Down Expand Up @@ -246,7 +248,10 @@
"# the function to add a pytorch model on the pytorch model\n",
"# - Register that function by associating it to the NeuralNetRegressor class\n",
"def add_skorch_constr(gp_model, skorch_model, input_vars, output_vars=None, **kwargs):\n",
" return add_sequential_constr(gp_model, skorch_model.module, input_vars, output_vars, **kwargs)\n",
" return add_sequential_constr(\n",
" gp_model, skorch_model.module, input_vars, output_vars, **kwargs\n",
" )\n",
"\n",
"\n",
"register_predictor_constr(NeuralNetRegressor, add_skorch_constr)"
]
Expand All @@ -260,7 +265,10 @@
"source": [
"# Now do the same for the polynomial features\n",
"def add_my_polynomial_features_constr(gp_model, poly_feat, input_vars, **kwargs):\n",
" return add_polynomial_features_constr(gp_model, poly_feat.poly_feat, input_vars, **kwargs)\n",
" return add_polynomial_features_constr(\n",
" gp_model, poly_feat.poly_feat, input_vars, **kwargs\n",
" )\n",
"\n",
"\n",
"register_predictor_constr(MyPolynomialFeatures, add_my_polynomial_features_constr)"
]
Expand Down
20 changes: 10 additions & 10 deletions notebooks/Functions_approx/2DPeakFunction skorch.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,9 @@
"from matplotlib import cm\n",
"from matplotlib import pyplot as plt\n",
"from sklearn import metrics\n",
"from sklearn.pipeline import make_pipeline\n",
"from sklearn.preprocessing import PolynomialFeatures\n",
"\n",
"from gurobi_ml import add_predictor_constr, register_predictor_constr\n",
"\n",
"from gurobi_ml.sklearn import add_polynomial_features_constr\n",
"from gurobi_ml.torch import add_sequential_constr"
]
},
Expand Down Expand Up @@ -106,12 +103,12 @@
"hs = 30\n",
"nn_regression = NeuralNetRegressor(\n",
" torch.nn.Sequential(\n",
" torch.nn.Linear(2, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, 1),\n",
"),\n",
" torch.nn.Linear(2, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, hs),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Linear(hs, 1),\n",
" ),\n",
" max_epochs=20,\n",
" lr=0.1,\n",
" iterator_train__shuffle=True,\n",
Expand Down Expand Up @@ -214,7 +211,10 @@
"# the function to add a pytorch model on the pytorch model\n",
"# - Register that function by associating it to the NeuralNetRegressor class\n",
"def add_skorch_constr(gp_model, skorch_model, input_vars, output_vars=None, **kwargs):\n",
" return add_sequential_constr(gp_model, skorch_model.module, input_vars, output_vars, **kwargs)\n",
" return add_sequential_constr(\n",
" gp_model, skorch_model.module, input_vars, output_vars, **kwargs\n",
" )\n",
"\n",
"\n",
"register_predictor_constr(NeuralNetRegressor, add_skorch_constr)"
]
Expand Down
4 changes: 1 addition & 3 deletions notebooks/Functions_approx/Parabola.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -276,9 +276,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(\n",
" f\"Maximal error in predicted values in solution {np.max(pconstr.get_error())}\"\n",
")"
"print(f\"Maximal error in predicted values in solution {np.max(pconstr.get_error())}\")"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions notebooks/Janos/Decision Tree.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -135,10 +135,10 @@
"source": [
"m = gp.Model()\n",
"\n",
"y = gppd.add_vars(m, studentsdata, name='enroll_probability')\n",
"y = gppd.add_vars(m, studentsdata, name=\"enroll_probability\")\n",
"\n",
"# Add variable for merit\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name='merit')\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name=\"merit\")\n",
"\n",
"# Keep only features\n",
"studentsdata = studentsdata.loc[:, features]\n",
Expand Down
4 changes: 2 additions & 2 deletions notebooks/Janos/Gradient Boosting Regression.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -131,10 +131,10 @@
"source": [
"m = gp.Model()\n",
"\n",
"y = gppd.add_vars(m, studentsdata, name='enroll_probability')\n",
"y = gppd.add_vars(m, studentsdata, name=\"enroll_probability\")\n",
"\n",
"# Add variable for merit\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name='merit')\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name=\"merit\")\n",
"\n",
"# Keep only features\n",
"studentsdata = studentsdata.loc[:, features]\n",
Expand Down
18 changes: 9 additions & 9 deletions notebooks/Janos/LightGBM.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,10 @@
"source": [
"import gurobipy as gp\n",
"import numpy as np\n",
"import math\n",
"import pandas as pd\n",
"import gurobipy_pandas as gppd\n",
"from matplotlib import pyplot as plt\n",
"import lightgbm as lgb\n",
"from sklearn.metrics import r2_score"
"import lightgbm as lgb"
]
},
{
Expand All @@ -41,7 +39,8 @@
"%load_ext autoreload\n",
"%autoreload 2\n",
"import sys\n",
"sys.path.append('../../src')"
"\n",
"sys.path.append(\"../../src\")"
]
},
{
Expand All @@ -52,8 +51,7 @@
"outputs": [],
"source": [
"from gurobi_ml import add_predictor_constr\n",
"from gurobi_ml import lightgbm\n",
"import gurobi_ml as gml"
"from gurobi_ml import lightgbm"
]
},
{
Expand Down Expand Up @@ -167,10 +165,10 @@
"source": [
"m = gp.Model()\n",
"\n",
"y = gppd.add_vars(m, studentsdata, lb=-1e100, name='enroll_probability')\n",
"y = gppd.add_vars(m, studentsdata, lb=-1e100, name=\"enroll_probability\")\n",
"\n",
"# Add variable for merit\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name='merit')\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name=\"merit\")\n",
"\n",
"# Keep only features\n",
"studentsdata = studentsdata.loc[:, features]\n",
Expand Down Expand Up @@ -255,7 +253,9 @@
"pred_constr.remove()\n",
"\n",
"# Add new constraint setting epsilon to 1e-5\n",
"pred_constr = lightgbm.add_lgbmregressor_constr(m, regression, studentsdata, y, epsilon=1e-4)\n",
"pred_constr = lightgbm.add_lgbmregressor_constr(\n",
" m, regression, studentsdata, y, epsilon=1e-4\n",
")\n",
"\n",
"pred_constr.print_stats()\n",
"\n",
Expand Down
4 changes: 2 additions & 2 deletions notebooks/Janos/Random Forest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,10 @@
"source": [
"m = gp.Model()\n",
"\n",
"y = gppd.add_vars(m, studentsdata, name='enroll_probability')\n",
"y = gppd.add_vars(m, studentsdata, name=\"enroll_probability\")\n",
"\n",
"# Add variable for merit\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name='merit')\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name=\"merit\")\n",
"\n",
"# Keep only features\n",
"studentsdata = studentsdata.loc[:, features]\n",
Expand Down
10 changes: 5 additions & 5 deletions notebooks/Janos/XGBoost.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,10 @@
"source": [
"import gurobipy as gp\n",
"import numpy as np\n",
"import math\n",
"import pandas as pd\n",
"import gurobipy_pandas as gppd\n",
"from matplotlib import pyplot as plt\n",
"import xgboost as xgb\n",
"from sklearn.metrics import r2_score\n",
"\n",
"from gurobi_ml import add_predictor_constr"
]
Expand Down Expand Up @@ -84,7 +82,9 @@
"source": [
"# Run our regression\n",
"n_estimators = 20\n",
"regression = xgb.XGBRegressor(n_estimators=n_estimators, max_depth=5, random_state=1, booster=\"gbtree\")\n",
"regression = xgb.XGBRegressor(\n",
" n_estimators=n_estimators, max_depth=5, random_state=1, booster=\"gbtree\"\n",
")\n",
"\n",
"regression.fit(X=historical_data.loc[:, features], y=historical_data.loc[:, target])"
]
Expand Down Expand Up @@ -135,10 +135,10 @@
"source": [
"m = gp.Model()\n",
"\n",
"y = gppd.add_vars(m, studentsdata, name='enroll_probability')\n",
"y = gppd.add_vars(m, studentsdata, name=\"enroll_probability\")\n",
"\n",
"# Add variable for merit\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name='merit')\n",
"studentsdata = studentsdata.gppd.add_vars(m, lb=0.0, ub=2.5, name=\"merit\")\n",
"\n",
"# Keep only features\n",
"studentsdata = studentsdata.loc[:, features]\n",
Expand Down
1 change: 0 additions & 1 deletion notebooks/adversarial/adversarial_pytorch.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from matplotlib import pyplot as plt\n",
"\n",
"import torch\n",
Expand Down
Loading

0 comments on commit 0db5840

Please sign in to comment.