Implement MemorizingCutsComponent; STAB: switch to edge formulation

dev
Alinson S. Xavier 2 years ago
parent b81815d35b
commit 8805a83c1c
Signed by: isoron
GPG Key ID: 0DA8E4B9E1109DCA

@ -108,7 +108,11 @@
"execution_count": 1, "execution_count": 1,
"id": "f14e560c-ef9f-4c48-8467-72d6acce5f9f", "id": "f14e560c-ef9f-4c48-8467-72d6acce5f9f",
"metadata": { "metadata": {
"tags": [] "tags": [],
"ExecuteTime": {
"end_time": "2023-11-07T16:29:48.409419720Z",
"start_time": "2023-11-07T16:29:47.824353556Z"
}
}, },
"outputs": [ "outputs": [
{ {
@ -126,10 +130,11 @@
"8 [ 8.47 21.9 16.58 15.37 3.76 3.91 1.57 20.57 14.76 18.61] 94.58\n", "8 [ 8.47 21.9 16.58 15.37 3.76 3.91 1.57 20.57 14.76 18.61] 94.58\n",
"9 [ 8.57 22.77 17.06 16.25 4.14 4. 1.56 22.97 14.09 19.09] 100.79\n", "9 [ 8.57 22.77 17.06 16.25 4.14 4. 1.56 22.97 14.09 19.09] 100.79\n",
"\n", "\n",
"Restricted license - for non-production use only - expires 2024-10-28\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 20 rows, 110 columns and 210 nonzeros\n", "Optimize a model with 20 rows, 110 columns and 210 nonzeros\n",
"Model fingerprint: 0x1ff9913f\n", "Model fingerprint: 0x1ff9913f\n",
@ -154,22 +159,14 @@
"H 0 0 2.0000000 1.27484 36.3% - 0s\n", "H 0 0 2.0000000 1.27484 36.3% - 0s\n",
" 0 0 1.27484 0 4 2.00000 1.27484 36.3% - 0s\n", " 0 0 1.27484 0 4 2.00000 1.27484 36.3% - 0s\n",
"\n", "\n",
"Explored 1 nodes (38 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Explored 1 nodes (38 simplex iterations) in 0.02 seconds (0.00 work units)\n",
"Thread count was 32 (of 32 available processors)\n", "Thread count was 12 (of 12 available processors)\n",
"\n", "\n",
"Solution count 3: 2 4 5 \n", "Solution count 3: 2 4 5 \n",
"\n", "\n",
"Optimal solution found (tolerance 1.00e-04)\n", "Optimal solution found (tolerance 1.00e-04)\n",
"Best objective 2.000000000000e+00, best bound 2.000000000000e+00, gap 0.0000%\n" "Best objective 2.000000000000e+00, best bound 2.000000000000e+00, gap 0.0000%\n"
] ]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/axavier/.conda/envs/miplearn2/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
} }
], ],
"source": [ "source": [
@ -304,7 +301,12 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 2,
"id": "1ce5f8fb-2769-4fbd-a40c-fd62b897690a", "id": "1ce5f8fb-2769-4fbd-a40c-fd62b897690a",
"metadata": {}, "metadata": {
"ExecuteTime": {
"end_time": "2023-11-07T16:29:48.485068449Z",
"start_time": "2023-11-07T16:29:48.406139946Z"
}
},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
@ -323,8 +325,8 @@
"\n", "\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 5 rows, 10 columns and 50 nonzeros\n", "Optimize a model with 5 rows, 10 columns and 50 nonzeros\n",
"Model fingerprint: 0xaf3ac15e\n", "Model fingerprint: 0xaf3ac15e\n",
@ -352,7 +354,7 @@
" Cover: 1\n", " Cover: 1\n",
"\n", "\n",
"Explored 1 nodes (4 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Explored 1 nodes (4 simplex iterations) in 0.01 seconds (0.00 work units)\n",
"Thread count was 32 (of 32 available processors)\n", "Thread count was 12 (of 12 available processors)\n",
"\n", "\n",
"Solution count 2: -1279 -804 \n", "Solution count 2: -1279 -804 \n",
"No other solutions better than -1279\n", "No other solutions better than -1279\n",
@ -470,7 +472,12 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 3,
"id": "4e0e4223-b4e0-4962-a157-82a23a86e37d", "id": "4e0e4223-b4e0-4962-a157-82a23a86e37d",
"metadata": {}, "metadata": {
"ExecuteTime": {
"end_time": "2023-11-07T16:29:48.575025403Z",
"start_time": "2023-11-07T16:29:48.453962705Z"
}
},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
@ -493,8 +500,8 @@
"\n", "\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 21 rows, 110 columns and 220 nonzeros\n", "Optimize a model with 21 rows, 110 columns and 220 nonzeros\n",
"Model fingerprint: 0x8d8d9346\n", "Model fingerprint: 0x8d8d9346\n",
@ -529,7 +536,7 @@
"* 0 0 0 91.2300000 91.23000 0.00% - 0s\n", "* 0 0 0 91.2300000 91.23000 0.00% - 0s\n",
"\n", "\n",
"Explored 1 nodes (70 simplex iterations) in 0.02 seconds (0.00 work units)\n", "Explored 1 nodes (70 simplex iterations) in 0.02 seconds (0.00 work units)\n",
"Thread count was 32 (of 32 available processors)\n", "Thread count was 12 (of 12 available processors)\n",
"\n", "\n",
"Solution count 10: 91.23 93.92 93.98 ... 368.79\n", "Solution count 10: 91.23 93.92 93.98 ... 368.79\n",
"\n", "\n",
@ -643,7 +650,12 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 4,
"id": "3224845b-9afd-463e-abf4-e0e93d304859", "id": "3224845b-9afd-463e-abf4-e0e93d304859",
"metadata": {}, "metadata": {
"ExecuteTime": {
"end_time": "2023-11-07T16:29:48.804292323Z",
"start_time": "2023-11-07T16:29:48.492933268Z"
}
},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
@ -660,8 +672,8 @@
"\n", "\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 5 rows, 10 columns and 28 nonzeros\n", "Optimize a model with 5 rows, 10 columns and 28 nonzeros\n",
"Model fingerprint: 0xe5c2d4fa\n", "Model fingerprint: 0xe5c2d4fa\n",
@ -676,8 +688,8 @@
"Presolve time: 0.00s\n", "Presolve time: 0.00s\n",
"Presolve: All rows and columns removed\n", "Presolve: All rows and columns removed\n",
"\n", "\n",
"Explored 0 nodes (0 simplex iterations) in 0.00 seconds (0.00 work units)\n", "Explored 0 nodes (0 simplex iterations) in 0.01 seconds (0.00 work units)\n",
"Thread count was 1 (of 32 available processors)\n", "Thread count was 1 (of 12 available processors)\n",
"\n", "\n",
"Solution count 1: 213.49 \n", "Solution count 1: 213.49 \n",
"\n", "\n",
@ -775,8 +787,9 @@
"id": "cc797da7", "id": "cc797da7",
"metadata": { "metadata": {
"collapsed": false, "collapsed": false,
"jupyter": { "ExecuteTime": {
"outputs_hidden": false "end_time": "2023-11-07T16:29:48.806917868Z",
"start_time": "2023-11-07T16:29:48.781619530Z"
} }
}, },
"outputs": [ "outputs": [
@ -795,8 +808,8 @@
"\n", "\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 5 rows, 10 columns and 28 nonzeros\n", "Optimize a model with 5 rows, 10 columns and 28 nonzeros\n",
"Model fingerprint: 0x4ee91388\n", "Model fingerprint: 0x4ee91388\n",
@ -811,9 +824,8 @@
"Presolve time: 0.00s\n", "Presolve time: 0.00s\n",
"Presolve: All rows and columns removed\n", "Presolve: All rows and columns removed\n",
"\n", "\n",
"Explored 0 nodes (0 simplex iterations) in 0.00 seconds (0.00 work units)\n", "Explored 0 nodes (0 simplex iterations) in 0.01 seconds (0.00 work units)\n",
"Thread count was 1 (of 32 available processors)\n", "Thread count was 1 (of 12 available processors)\n",
"\n",
"Solution count 2: -1986.37 -1265.56 \n", "Solution count 2: -1986.37 -1265.56 \n",
"No other solutions better than -1986.37\n", "No other solutions better than -1986.37\n",
"\n", "\n",
@ -875,11 +887,10 @@
"$$\n", "$$\n",
"\\begin{align*}\n", "\\begin{align*}\n",
"\\text{minimize} \\;\\;\\; & -\\sum_{v \\in V} w_v x_v \\\\\n", "\\text{minimize} \\;\\;\\; & -\\sum_{v \\in V} w_v x_v \\\\\n",
"\\text{such that} \\;\\;\\; & \\sum_{v \\in C} x_v \\leq 1 & \\forall C \\in \\mathcal{C} \\\\\n", "\\text{such that} \\;\\;\\; & x_v + x_u \\leq 1 & \\forall (v,u) \\in E \\\\\n",
"& x_v \\in \\{0, 1\\} & \\forall v \\in V\n", "& x_v \\in \\{0, 1\\} & \\forall v \\in V\n",
"\\end{align*}\n", "\\end{align*}\n",
"$$\n", "$$"
"where $\\mathcal{C}$ is the set of cliques in $G$. We recall that a clique is a subset of vertices in which every pair of vertices is adjacent."
] ]
}, },
{ {
@ -903,7 +914,12 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 6,
"id": "0f996e99-0ec9-472b-be8a-30c9b8556931", "id": "0f996e99-0ec9-472b-be8a-30c9b8556931",
"metadata": {}, "metadata": {
"ExecuteTime": {
"end_time": "2023-11-07T16:29:48.954896857Z",
"start_time": "2023-11-07T16:29:48.825579097Z"
}
},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
@ -913,13 +929,14 @@
"weights[0] [37.45 95.07 73.2 59.87 15.6 15.6 5.81 86.62 60.11 70.81]\n", "weights[0] [37.45 95.07 73.2 59.87 15.6 15.6 5.81 86.62 60.11 70.81]\n",
"weights[1] [ 2.06 96.99 83.24 21.23 18.18 18.34 30.42 52.48 43.19 29.12]\n", "weights[1] [ 2.06 96.99 83.24 21.23 18.18 18.34 30.42 52.48 43.19 29.12]\n",
"\n", "\n",
"Set parameter PreCrush to value 1\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 10 rows, 10 columns and 24 nonzeros\n", "Optimize a model with 15 rows, 10 columns and 30 nonzeros\n",
"Model fingerprint: 0xf4c21689\n", "Model fingerprint: 0x3240ea4a\n",
"Variable types: 0 continuous, 10 integer (10 binary)\n", "Variable types: 0 continuous, 10 integer (10 binary)\n",
"Coefficient statistics:\n", "Coefficient statistics:\n",
" Matrix range [1e+00, 1e+00]\n", " Matrix range [1e+00, 1e+00]\n",
@ -927,26 +944,28 @@
" Bounds range [1e+00, 1e+00]\n", " Bounds range [1e+00, 1e+00]\n",
" RHS range [1e+00, 1e+00]\n", " RHS range [1e+00, 1e+00]\n",
"Found heuristic solution: objective -219.1400000\n", "Found heuristic solution: objective -219.1400000\n",
"Presolve removed 2 rows and 2 columns\n", "Presolve removed 7 rows and 2 columns\n",
"Presolve time: 0.00s\n", "Presolve time: 0.00s\n",
"Presolved: 8 rows, 8 columns, 19 nonzeros\n", "Presolved: 8 rows, 8 columns, 19 nonzeros\n",
"Variable types: 0 continuous, 8 integer (8 binary)\n", "Variable types: 0 continuous, 8 integer (8 binary)\n",
"\n", "\n",
"Root relaxation: objective -2.205650e+02, 4 iterations, 0.00 seconds (0.00 work units)\n", "Root relaxation: objective -2.205650e+02, 5 iterations, 0.00 seconds (0.00 work units)\n",
"\n", "\n",
" Nodes | Current Node | Objective Bounds | Work\n", " Nodes | Current Node | Objective Bounds | Work\n",
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
"\n", "\n",
" 0 0 infeasible 0 -219.14000 -219.14000 0.00% - 0s\n", " 0 0 infeasible 0 -219.14000 -219.14000 0.00% - 0s\n",
"\n", "\n",
"Explored 1 nodes (4 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Explored 1 nodes (5 simplex iterations) in 0.01 seconds (0.00 work units)\n",
"Thread count was 32 (of 32 available processors)\n", "Thread count was 12 (of 12 available processors)\n",
"\n", "\n",
"Solution count 1: -219.14 \n", "Solution count 1: -219.14 \n",
"No other solutions better than -219.14\n", "No other solutions better than -219.14\n",
"\n", "\n",
"Optimal solution found (tolerance 1.00e-04)\n", "Optimal solution found (tolerance 1.00e-04)\n",
"Best objective -2.191400000000e+02, best bound -2.191400000000e+02, gap 0.0000%\n" "Best objective -2.191400000000e+02, best bound -2.191400000000e+02, gap 0.0000%\n",
"\n",
"User-callback calls 300, time in user-callback 0.00 sec\n"
] ]
} }
], ],
@ -956,7 +975,7 @@
"from scipy.stats import uniform, randint\n", "from scipy.stats import uniform, randint\n",
"from miplearn.problems.stab import (\n", "from miplearn.problems.stab import (\n",
" MaxWeightStableSetGenerator,\n", " MaxWeightStableSetGenerator,\n",
" build_stab_model_gurobipy,\n", " build_stab_model,\n",
")\n", ")\n",
"\n", "\n",
"# Set random seed to make example reproducible\n", "# Set random seed to make example reproducible\n",
@ -979,7 +998,7 @@
"print()\n", "print()\n",
"\n", "\n",
"# Load and optimize the first instance\n", "# Load and optimize the first instance\n",
"model = build_stab_model_gurobipy(data[0])\n", "model = build_stab_model(data[0])\n",
"model.optimize()\n" "model.optimize()\n"
] ]
}, },
@ -1053,8 +1072,9 @@
"id": "9d0c56c6", "id": "9d0c56c6",
"metadata": { "metadata": {
"collapsed": false, "collapsed": false,
"jupyter": { "ExecuteTime": {
"outputs_hidden": false "end_time": "2023-11-07T16:29:48.958833448Z",
"start_time": "2023-11-07T16:29:48.898121017Z"
} }
}, },
"outputs": [ "outputs": [
@ -1085,11 +1105,12 @@
" [ 444. 398. 371. 454. 356. 476. 565. 374. 0. 274.]\n", " [ 444. 398. 371. 454. 356. 476. 565. 374. 0. 274.]\n",
" [ 668. 446. 317. 648. 469. 752. 394. 286. 274. 0.]]\n", " [ 668. 446. 317. 648. 469. 752. 394. 286. 274. 0.]]\n",
"\n", "\n",
"Set parameter PreCrush to value 1\n",
"Set parameter LazyConstraints to value 1\n", "Set parameter LazyConstraints to value 1\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 10 rows, 45 columns and 90 nonzeros\n", "Optimize a model with 10 rows, 45 columns and 90 nonzeros\n",
"Model fingerprint: 0x719675e5\n", "Model fingerprint: 0x719675e5\n",
@ -1114,7 +1135,7 @@
" Lazy constraints: 3\n", " Lazy constraints: 3\n",
"\n", "\n",
"Explored 1 nodes (17 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Explored 1 nodes (17 simplex iterations) in 0.01 seconds (0.00 work units)\n",
"Thread count was 32 (of 32 available processors)\n", "Thread count was 12 (of 12 available processors)\n",
"\n", "\n",
"Solution count 1: 2921 \n", "Solution count 1: 2921 \n",
"\n", "\n",
@ -1263,8 +1284,9 @@
"id": "6217da7c", "id": "6217da7c",
"metadata": { "metadata": {
"collapsed": false, "collapsed": false,
"jupyter": { "ExecuteTime": {
"outputs_hidden": false "end_time": "2023-11-07T16:29:49.061613905Z",
"start_time": "2023-11-07T16:29:48.941857719Z"
} }
}, },
"outputs": [ "outputs": [
@ -1300,8 +1322,8 @@
"\n", "\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 578 rows, 360 columns and 2128 nonzeros\n", "Optimize a model with 578 rows, 360 columns and 2128 nonzeros\n",
"Model fingerprint: 0x4dc1c661\n", "Model fingerprint: 0x4dc1c661\n",
@ -1312,7 +1334,7 @@
" Bounds range [1e+00, 1e+00]\n", " Bounds range [1e+00, 1e+00]\n",
" RHS range [1e+00, 1e+03]\n", " RHS range [1e+00, 1e+03]\n",
"Presolve removed 244 rows and 131 columns\n", "Presolve removed 244 rows and 131 columns\n",
"Presolve time: 0.02s\n", "Presolve time: 0.01s\n",
"Presolved: 334 rows, 229 columns, 842 nonzeros\n", "Presolved: 334 rows, 229 columns, 842 nonzeros\n",
"Variable types: 116 continuous, 113 integer (113 binary)\n", "Variable types: 116 continuous, 113 integer (113 binary)\n",
"Found heuristic solution: objective 440662.46430\n", "Found heuristic solution: objective 440662.46430\n",
@ -1340,7 +1362,7 @@
" Relax-and-lift: 7\n", " Relax-and-lift: 7\n",
"\n", "\n",
"Explored 1 nodes (234 simplex iterations) in 0.04 seconds (0.02 work units)\n", "Explored 1 nodes (234 simplex iterations) in 0.04 seconds (0.02 work units)\n",
"Thread count was 32 (of 32 available processors)\n", "Thread count was 12 (of 12 available processors)\n",
"\n", "\n",
"Solution count 5: 364722 368600 374044 ... 440662\n", "Solution count 5: 364722 368600 374044 ... 440662\n",
"\n", "\n",
@ -1450,7 +1472,12 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 9,
"id": "5fff7afe-5b7a-4889-a502-66751ec979bf", "id": "5fff7afe-5b7a-4889-a502-66751ec979bf",
"metadata": {}, "metadata": {
"ExecuteTime": {
"end_time": "2023-11-07T16:29:49.075657363Z",
"start_time": "2023-11-07T16:29:49.049561363Z"
}
},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
@ -1462,8 +1489,8 @@
"\n", "\n",
"Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n",
"\n", "\n",
"CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n",
"Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n",
"\n", "\n",
"Optimize a model with 15 rows, 10 columns and 30 nonzeros\n", "Optimize a model with 15 rows, 10 columns and 30 nonzeros\n",
"Model fingerprint: 0x2d2d1390\n", "Model fingerprint: 0x2d2d1390\n",
@ -1487,7 +1514,7 @@
" 0 0 infeasible 0 301.00000 301.00000 0.00% - 0s\n", " 0 0 infeasible 0 301.00000 301.00000 0.00% - 0s\n",
"\n", "\n",
"Explored 1 nodes (8 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Explored 1 nodes (8 simplex iterations) in 0.01 seconds (0.00 work units)\n",
"Thread count was 32 (of 32 available processors)\n", "Thread count was 12 (of 12 available processors)\n",
"\n", "\n",
"Solution count 1: 301 \n", "Solution count 1: 301 \n",
"\n", "\n",
@ -1531,12 +1558,13 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"id": "9f12e91f", "id": "9f12e91f",
"metadata": { "metadata": {
"collapsed": false, "collapsed": false,
"jupyter": { "ExecuteTime": {
"outputs_hidden": false "end_time": "2023-11-07T16:29:49.075852252Z",
"start_time": "2023-11-07T16:29:49.050243601Z"
} }
}, },
"outputs": [], "outputs": [],

@ -60,8 +60,7 @@ class BasicCollector:
# Add lazy constraints to model # Add lazy constraints to model
if model.lazy_enforce is not None: if model.lazy_enforce is not None:
model.lazy_enforce(model, model.lazy_constrs_) model.lazy_enforce(model, model.lazy_)
h5.put_scalar("mip_lazy", repr(model.lazy_constrs_))
# Save MPS file # Save MPS file
model.write(mps_filename) model.write(mps_filename)

@ -0,0 +1,105 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import List, Dict, Any, Hashable, Union
import numpy as np
from sklearn.preprocessing import MultiLabelBinarizer
from miplearn.extractors.abstract import FeaturesExtractor
from miplearn.h5 import H5File
from miplearn.solvers.abstract import AbstractModel
logger = logging.getLogger(__name__)
class _BaseMemorizingConstrComponent:
def __init__(self, clf: Any, extractor: FeaturesExtractor, field: str) -> None:
self.clf = clf
self.extractor = extractor
self.constrs_: List[Hashable] = []
self.n_features_: int = 0
self.n_targets_: int = 0
self.field = field
def fit(
self,
train_h5: List[str],
) -> None:
logger.info("Reading training data...")
n_samples = len(train_h5)
x, y, constrs, n_features = [], [], [], None
constr_to_idx: Dict[Hashable, int] = {}
for h5_filename in train_h5:
with H5File(h5_filename, "r") as h5:
# Store constraints
sample_constrs_str = h5.get_scalar(self.field)
assert sample_constrs_str is not None
assert isinstance(sample_constrs_str, str)
sample_constrs = eval(sample_constrs_str)
assert isinstance(sample_constrs, list)
y_sample = []
for c in sample_constrs:
if c not in constr_to_idx:
constr_to_idx[c] = len(constr_to_idx)
constrs.append(c)
y_sample.append(constr_to_idx[c])
y.append(y_sample)
# Extract features
x_sample = self.extractor.get_instance_features(h5)
assert len(x_sample.shape) == 1
if n_features is None:
n_features = len(x_sample)
else:
assert len(x_sample) == n_features
x.append(x_sample)
logger.info("Constructing matrices...")
assert n_features is not None
self.n_features_ = n_features
self.constrs_ = constrs
self.n_targets_ = len(constr_to_idx)
x_np = np.vstack(x)
assert x_np.shape == (n_samples, n_features)
y_np = MultiLabelBinarizer().fit_transform(y)
assert y_np.shape == (n_samples, self.n_targets_)
logger.info(
f"Dataset has {n_samples:,d} samples, "
f"{n_features:,d} features and {self.n_targets_:,d} targets"
)
logger.info("Training classifier...")
self.clf.fit(x_np, y_np)
def predict(
self,
msg: str,
test_h5: str,
) -> List[Hashable]:
with H5File(test_h5, "r") as h5:
x_sample = self.extractor.get_instance_features(h5)
assert x_sample.shape == (self.n_features_,)
x_sample = x_sample.reshape(1, -1)
logger.info(msg)
y = self.clf.predict(x_sample)
assert y.shape == (1, self.n_targets_)
y = y.reshape(-1)
return [self.constrs_[i] for (i, yi) in enumerate(y) if yi > 0.5]
class MemorizingCutsComponent(_BaseMemorizingConstrComponent):
def __init__(self, clf: Any, extractor: FeaturesExtractor) -> None:
super().__init__(clf, extractor, "mip_cuts")
def before_mip(
self,
test_h5: str,
model: AbstractModel,
stats: Dict[str, Any],
) -> None:
if model.cuts_enforce is None:
return
assert self.constrs_ is not None
model.cuts_aot_ = self.predict("Predicting cutting planes...", test_h5)
stats["Cuts: AOT"] = len(model.cuts_aot_)

@ -1,74 +1,22 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved. # Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import logging import logging
from typing import List, Dict, Any, Hashable from typing import List, Dict, Any, Hashable
import numpy as np from miplearn.components.cuts.mem import (
from sklearn.preprocessing import MultiLabelBinarizer _BaseMemorizingConstrComponent,
)
from miplearn.extractors.abstract import FeaturesExtractor from miplearn.extractors.abstract import FeaturesExtractor
from miplearn.h5 import H5File
from miplearn.solvers.abstract import AbstractModel from miplearn.solvers.abstract import AbstractModel
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class MemorizingLazyConstrComponent: class MemorizingLazyComponent(_BaseMemorizingConstrComponent):
def __init__(self, clf: Any, extractor: FeaturesExtractor) -> None: def __init__(self, clf: Any, extractor: FeaturesExtractor) -> None:
self.clf = clf super().__init__(clf, extractor, "mip_lazy")
self.extractor = extractor
self.constrs_: List[Hashable] = []
self.n_features_: int = 0
self.n_targets_: int = 0
def fit(self, train_h5: List[str]) -> None:
logger.info("Reading training data...")
n_samples = len(train_h5)
x, y, constrs, n_features = [], [], [], None
constr_to_idx: Dict[Hashable, int] = {}
for h5_filename in train_h5:
with H5File(h5_filename, "r") as h5:
# Store lazy constraints
sample_constrs_str = h5.get_scalar("mip_lazy")
assert sample_constrs_str is not None
assert isinstance(sample_constrs_str, str)
sample_constrs = eval(sample_constrs_str)
assert isinstance(sample_constrs, list)
y_sample = []
for c in sample_constrs:
if c not in constr_to_idx:
constr_to_idx[c] = len(constr_to_idx)
constrs.append(c)
y_sample.append(constr_to_idx[c])
y.append(y_sample)
# Extract features
x_sample = self.extractor.get_instance_features(h5)
assert len(x_sample.shape) == 1
if n_features is None:
n_features = len(x_sample)
else:
assert len(x_sample) == n_features
x.append(x_sample)
logger.info("Constructing matrices...")
assert n_features is not None
self.n_features_ = n_features
self.constrs_ = constrs
self.n_targets_ = len(constr_to_idx)
x_np = np.vstack(x)
assert x_np.shape == (n_samples, n_features)
y_np = MultiLabelBinarizer().fit_transform(y)
assert y_np.shape == (n_samples, self.n_targets_)
logger.info(
f"Dataset has {n_samples:,d} samples, "
f"{n_features:,d} features and {self.n_targets_:,d} targets"
)
logger.info("Training classifier...")
self.clf.fit(x_np, y_np)
def before_mip( def before_mip(
self, self,
@ -78,23 +26,8 @@ class MemorizingLazyConstrComponent:
) -> None: ) -> None:
if model.lazy_enforce is None: if model.lazy_enforce is None:
return return
assert self.constrs_ is not None assert self.constrs_ is not None
violations = self.predict("Predicting violated lazy constraints...", test_h5)
# Read features
with H5File(test_h5, "r") as h5:
x_sample = self.extractor.get_instance_features(h5)
assert x_sample.shape == (self.n_features_,)
x_sample = x_sample.reshape(1, -1)
# Predict violated constraints
logger.info("Predicting violated lazy constraints...")
y = self.clf.predict(x_sample)
assert y.shape == (1, self.n_targets_)
y = y.reshape(-1)
# Enforce constraints
violations = [self.constrs_[i] for (i, yi) in enumerate(y) if yi > 0.5]
logger.info(f"Enforcing {len(violations)} constraints ahead-of-time...") logger.info(f"Enforcing {len(violations)} constraints ahead-of-time...")
model.lazy_enforce(model, violations) model.lazy_enforce(model, violations)
stats["Lazy Constraints: AOT"] = len(violations) stats["Lazy Constraints: AOT"] = len(violations)

@ -1,14 +1,13 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved. # Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import logging
from dataclasses import dataclass from dataclasses import dataclass
from typing import List, Union from typing import List, Union, Any, Hashable
import gurobipy as gp import gurobipy as gp
import networkx as nx import networkx as nx
import numpy as np import numpy as np
import pyomo.environ as pe
from gurobipy import GRB, quicksum from gurobipy import GRB, quicksum
from networkx import Graph from networkx import Graph
from scipy.stats import uniform, randint from scipy.stats import uniform, randint
@ -16,7 +15,8 @@ from scipy.stats.distributions import rv_frozen
from miplearn.io import read_pkl_gz from miplearn.io import read_pkl_gz
from miplearn.solvers.gurobi import GurobiModel from miplearn.solvers.gurobi import GurobiModel
from miplearn.solvers.pyomo import PyomoModel
logger = logging.getLogger(__name__)
@dataclass @dataclass
@ -82,35 +82,43 @@ class MaxWeightStableSetGenerator:
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs()) return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
def build_stab_model_gurobipy(data: MaxWeightStableSetData) -> GurobiModel: def build_stab_model(data: MaxWeightStableSetData) -> GurobiModel:
data = _read_stab_data(data) if isinstance(data, str):
data = read_pkl_gz(data)
assert isinstance(data, MaxWeightStableSetData)
model = gp.Model() model = gp.Model()
nodes = list(data.graph.nodes) nodes = list(data.graph.nodes)
# Variables and objective function
x = model.addVars(nodes, vtype=GRB.BINARY, name="x") x = model.addVars(nodes, vtype=GRB.BINARY, name="x")
model.setObjective(quicksum(-data.weights[i] * x[i] for i in nodes)) model.setObjective(quicksum(-data.weights[i] * x[i] for i in nodes))
# Edge inequalities
for (i1, i2) in data.graph.edges:
model.addConstr(x[i1] + x[i2] <= 1)
def cuts_separate(m: GurobiModel) -> List[Hashable]:
# Retrieve optimal fractional solution
x_val = m.inner.cbGetNodeRel(x)
# Check that we selected at most one vertex for each
# clique in the graph (sum <= 1)
violations: List[Hashable] = []
for clique in nx.find_cliques(data.graph): for clique in nx.find_cliques(data.graph):
model.addConstr(quicksum(x[i] for i in clique) <= 1) if sum(x_val[i] for i in clique) > 1.0001:
model.update() violations.append(tuple(sorted(clique)))
return GurobiModel(model) return violations
def build_stab_model_pyomo(
data: MaxWeightStableSetData,
solver: str = "gurobi_persistent",
) -> PyomoModel:
data = _read_stab_data(data)
model = pe.ConcreteModel()
nodes = pe.Set(initialize=list(data.graph.nodes))
model.x = pe.Var(nodes, domain=pe.Boolean, name="x")
model.obj = pe.Objective(expr=sum([-data.weights[i] * model.x[i] for i in nodes]))
model.clique_eqs = pe.ConstraintList()
for clique in nx.find_cliques(data.graph):
model.clique_eqs.add(expr=sum(model.x[i] for i in clique) <= 1)
return PyomoModel(model, solver)
def cuts_enforce(m: GurobiModel, violations: List[Any]) -> None:
logger.info(f"Adding {len(violations)} clique cuts...")
for clique in violations:
m.add_constr(quicksum(x[i] for i in clique) <= 1)
def _read_stab_data(data: Union[str, MaxWeightStableSetData]) -> MaxWeightStableSetData: model.update()
if isinstance(data, str):
data = read_pkl_gz(data) return GurobiModel(
assert isinstance(data, MaxWeightStableSetData) model,
return data cuts_separate=cuts_separate,
cuts_enforce=cuts_enforce,
)

@ -23,7 +23,11 @@ class AbstractModel(ABC):
def __init__(self) -> None: def __init__(self) -> None:
self.lazy_enforce: Optional[Callable] = None self.lazy_enforce: Optional[Callable] = None
self.lazy_separate: Optional[Callable] = None self.lazy_separate: Optional[Callable] = None
self.lazy_constrs_: Optional[List[Any]] = None self.lazy_: Optional[List[Any]] = None
self.cuts_enforce: Optional[Callable] = None
self.cuts_separate: Optional[Callable] = None
self.cuts_: Optional[List[Any]] = None
self.cuts_aot_: Optional[List[Any]] = None
self.where = self.WHERE_DEFAULT self.where = self.WHERE_DEFAULT
@abstractmethod @abstractmethod

@ -1,6 +1,7 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved. # Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import Dict, Optional, Callable, Any, List from typing import Dict, Optional, Callable, Any, List
import gurobipy as gp import gurobipy as gp
@ -11,16 +12,40 @@ from scipy.sparse import lil_matrix
from miplearn.h5 import H5File from miplearn.h5 import H5File
from miplearn.solvers.abstract import AbstractModel from miplearn.solvers.abstract import AbstractModel
logger = logging.getLogger(__name__)
def _gurobi_callback(model: AbstractModel, where: int) -> None:
assert model.lazy_separate is not None def _gurobi_callback(model: AbstractModel, gp_model: gp.Model, where: int) -> None:
# Lazy constraints
if model.lazy_separate is not None:
assert model.lazy_enforce is not None assert model.lazy_enforce is not None
assert model.lazy_constrs_ is not None assert model.lazy_ is not None
if where == GRB.Callback.MIPSOL: if where == GRB.Callback.MIPSOL:
model.where = model.WHERE_LAZY model.where = model.WHERE_LAZY
violations = model.lazy_separate(model) violations = model.lazy_separate(model)
model.lazy_constrs_.extend(violations) if len(violations) > 0:
model.lazy_.extend(violations)
model.lazy_enforce(model, violations) model.lazy_enforce(model, violations)
# User cuts
if model.cuts_separate is not None:
assert model.cuts_enforce is not None
assert model.cuts_ is not None
if where == GRB.Callback.MIPNODE:
status = gp_model.cbGet(GRB.Callback.MIPNODE_STATUS)
if status == GRB.OPTIMAL:
model.where = model.WHERE_CUTS
if model.cuts_aot_ is not None:
violations = model.cuts_aot_
model.cuts_aot_ = None
logger.info(f"Enforcing {len(violations)} cuts ahead-of-time...")
else:
violations = model.cuts_separate(model)
if len(violations) > 0:
model.cuts_.extend(violations)
model.cuts_enforce(model, violations)
# Cleanup
model.where = model.WHERE_DEFAULT model.where = model.WHERE_DEFAULT
@ -44,10 +69,14 @@ class GurobiModel(AbstractModel):
inner: gp.Model, inner: gp.Model,
lazy_separate: Optional[Callable] = None, lazy_separate: Optional[Callable] = None,
lazy_enforce: Optional[Callable] = None, lazy_enforce: Optional[Callable] = None,
cuts_separate: Optional[Callable] = None,
cuts_enforce: Optional[Callable] = None,
) -> None: ) -> None:
super().__init__() super().__init__()
self.lazy_separate = lazy_separate self.lazy_separate = lazy_separate
self.lazy_enforce = lazy_enforce self.lazy_enforce = lazy_enforce
self.cuts_separate = cuts_separate
self.cuts_enforce = cuts_enforce
self.inner = inner self.inner = inner
def add_constrs( def add_constrs(
@ -125,6 +154,10 @@ class GurobiModel(AbstractModel):
except AttributeError: except AttributeError:
pass pass
self._extract_after_mip_solution_pool(h5) self._extract_after_mip_solution_pool(h5)
if self.lazy_ is not None:
h5.put_scalar("mip_lazy", repr(self.lazy_))
if self.cuts_ is not None:
h5.put_scalar("mip_cuts", repr(self.cuts_))
def fix_variables( def fix_variables(
self, self,
@ -149,14 +182,22 @@ class GurobiModel(AbstractModel):
stats["Fixed variables"] = n_fixed stats["Fixed variables"] = n_fixed
def optimize(self) -> None: def optimize(self) -> None:
self.lazy_constrs_ = [] self.lazy_ = []
self.cuts_ = []
def callback(_: gp.Model, where: int) -> None: def callback(_: gp.Model, where: int) -> None:
_gurobi_callback(self, where) _gurobi_callback(self, self.inner, where)
# Required parameters for lazy constraints
if self.lazy_enforce is not None: if self.lazy_enforce is not None:
self.inner.setParam("PreCrush", 1) self.inner.setParam("PreCrush", 1)
self.inner.setParam("LazyConstraints", 1) self.inner.setParam("LazyConstraints", 1)
# Required parameters for user cuts
if self.cuts_enforce is not None:
self.inner.setParam("PreCrush", 1)
if self.lazy_enforce is not None or self.cuts_enforce is not None:
self.inner.optimize(callback) self.inner.optimize(callback)
else: else:
self.inner.optimize() self.inner.optimize()

@ -36,7 +36,6 @@ class PyomoModel(AbstractModel):
self._is_warm_start_available = False self._is_warm_start_available = False
self.lazy_separate = lazy_separate self.lazy_separate = lazy_separate
self.lazy_enforce = lazy_enforce self.lazy_enforce = lazy_enforce
self.lazy_constrs_: Optional[List[Any]] = None
if not hasattr(self.inner, "dual"): if not hasattr(self.inner, "dual"):
self.inner.dual = Suffix(direction=Suffix.IMPORT) self.inner.dual = Suffix(direction=Suffix.IMPORT)
self.inner.rc = Suffix(direction=Suffix.IMPORT) self.inner.rc = Suffix(direction=Suffix.IMPORT)
@ -131,15 +130,14 @@ class PyomoModel(AbstractModel):
self.solver.update_var(var) self.solver.update_var(var)
def optimize(self) -> None: def optimize(self) -> None:
self.lazy_constrs_ = [] self.lazy_ = []
if self.lazy_separate is not None: if self.lazy_separate is not None:
assert ( assert (
self.solver_name == "gurobi_persistent" self.solver_name == "gurobi_persistent"
), "Callbacks are currently only supported on gurobi_persistent" ), "Callbacks are currently only supported on gurobi_persistent"
def callback(_: Any, __: Any, where: int) -> None: def callback(_: Any, __: Any, where: int) -> None:
_gurobi_callback(self, where) _gurobi_callback(self, self.solver, where)
self.solver.set_gurobi_param("PreCrush", 1) self.solver.set_gurobi_param("PreCrush", 1)
self.solver.set_gurobi_param("LazyConstraints", 1) self.solver.set_gurobi_param("LazyConstraints", 1)

@ -0,0 +1,80 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2023, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import Any, List, Hashable, Dict
from unittest.mock import Mock
import gurobipy as gp
import networkx as nx
from gurobipy import GRB, quicksum
from sklearn.dummy import DummyClassifier
from sklearn.neighbors import KNeighborsClassifier
from miplearn.components.cuts.mem import MemorizingCutsComponent
from miplearn.extractors.abstract import FeaturesExtractor
from miplearn.problems.stab import build_stab_model
from miplearn.solvers.gurobi import GurobiModel
from miplearn.solvers.learning import LearningSolver
import numpy as np
# def test_usage() -> None:
# model = _build_cut_model()
# solver = LearningSolver(components=[])
# solver.optimize(model)
# assert model.cuts_ is not None
# assert len(model.cuts_) > 0
# assert False
def test_mem_component(
stab_h5: List[str],
default_extractor: FeaturesExtractor,
) -> None:
clf = Mock(wraps=DummyClassifier())
comp = MemorizingCutsComponent(clf=clf, extractor=default_extractor)
comp.fit(stab_h5)
# Should call fit method with correct arguments
clf.fit.assert_called()
x, y = clf.fit.call_args.args
assert x.shape == (3, 50)
assert y.shape == (3, 388)
y = y.tolist()
assert y[0][:20] == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
assert y[1][:20] == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1]
assert y[2][:20] == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1]
# Should store violations
assert comp.constrs_ is not None
assert comp.n_features_ == 50
assert comp.n_targets_ == 388
assert len(comp.constrs_) == 388
# Call before-mip
stats: Dict[str, Any] = {}
model = Mock()
comp.before_mip(stab_h5[0], model, stats)
# Should call predict with correct args
clf.predict.assert_called()
(x_test,) = clf.predict.call_args.args
assert x_test.shape == (1, 50)
# Should set cuts_aot_
assert model.cuts_aot_ is not None
assert len(model.cuts_aot_) == 243
def test_usage_stab(
stab_h5: List[str],
default_extractor: FeaturesExtractor,
) -> None:
data_filenames = [f.replace(".h5", ".pkl.gz") for f in stab_h5]
clf = KNeighborsClassifier(n_neighbors=1)
comp = MemorizingCutsComponent(clf=clf, extractor=default_extractor)
solver = LearningSolver(components=[comp])
solver.fit(data_filenames)
stats = solver.optimize(data_filenames[0], build_stab_model)
assert stats["Cuts: AOT"] > 0

@ -8,7 +8,7 @@ from unittest.mock import Mock
from sklearn.dummy import DummyClassifier from sklearn.dummy import DummyClassifier
from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier
from miplearn.components.lazy.mem import MemorizingLazyConstrComponent from miplearn.components.lazy.mem import MemorizingLazyComponent
from miplearn.extractors.abstract import FeaturesExtractor from miplearn.extractors.abstract import FeaturesExtractor
from miplearn.problems.tsp import build_tsp_model from miplearn.problems.tsp import build_tsp_model
from miplearn.solvers.learning import LearningSolver from miplearn.solvers.learning import LearningSolver
@ -19,7 +19,7 @@ def test_mem_component(
default_extractor: FeaturesExtractor, default_extractor: FeaturesExtractor,
) -> None: ) -> None:
clf = Mock(wraps=DummyClassifier()) clf = Mock(wraps=DummyClassifier())
comp = MemorizingLazyConstrComponent(clf=clf, extractor=default_extractor) comp = MemorizingLazyComponent(clf=clf, extractor=default_extractor)
comp.fit(tsp_h5) comp.fit(tsp_h5)
# Should call fit method with correct arguments # Should call fit method with correct arguments
@ -56,7 +56,7 @@ def test_usage_tsp(
# Should not crash # Should not crash
data_filenames = [f.replace(".h5", ".pkl.gz") for f in tsp_h5] data_filenames = [f.replace(".h5", ".pkl.gz") for f in tsp_h5]
clf = KNeighborsClassifier(n_neighbors=1) clf = KNeighborsClassifier(n_neighbors=1)
comp = MemorizingLazyConstrComponent(clf=clf, extractor=default_extractor) comp = MemorizingLazyComponent(clf=clf, extractor=default_extractor)
solver = LearningSolver(components=[comp]) solver = LearningSolver(components=[comp])
solver.fit(data_filenames) solver.fit(data_filenames)
solver.optimize(data_filenames[0], build_tsp_model) solver.optimize(data_filenames[0], build_tsp_model)

@ -1,10 +1,13 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved. # Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import os
import shutil
import tempfile
from glob import glob from glob import glob
from os.path import dirname from os.path import dirname, basename, isfile
from typing import List from tempfile import NamedTemporaryFile
from typing import List, Any
import pytest import pytest
@ -12,14 +15,45 @@ from miplearn.extractors.abstract import FeaturesExtractor
from miplearn.extractors.fields import H5FieldsExtractor from miplearn.extractors.fields import H5FieldsExtractor
def _h5_fixture(pattern: str, request: Any) -> List[str]:
"""
Create a temporary copy of the provided .h5 files, along with the companion
.pkl.gz files, and return the path to the copy. Also register a finalizer,
so that the temporary folder is removed after the tests.
"""
filenames = glob(f"{dirname(__file__)}/fixtures/{pattern}")
print(filenames)
tmpdir = tempfile.mkdtemp()
def cleanup() -> None:
shutil.rmtree(tmpdir)
request.addfinalizer(cleanup)
print(tmpdir)
for f in filenames:
fbase, _ = os.path.splitext(f)
for ext in [".h5", ".pkl.gz"]:
dest = os.path.join(tmpdir, f"{basename(fbase)}{ext}")
print(dest)
shutil.copy(f"{fbase}{ext}", dest)
assert isfile(dest)
return sorted(glob(f"{tmpdir}/*.h5"))
@pytest.fixture()
def multiknapsack_h5(request: Any) -> List[str]:
return _h5_fixture("multiknapsack*.h5", request)
@pytest.fixture() @pytest.fixture()
def multiknapsack_h5() -> List[str]: def tsp_h5(request: Any) -> List[str]:
return sorted(glob(f"{dirname(__file__)}/fixtures/multiknapsack-n100*.h5")) return _h5_fixture("tsp*.h5", request)
@pytest.fixture() @pytest.fixture()
def tsp_h5() -> List[str]: def stab_h5(request: Any) -> List[str]:
return sorted(glob(f"{dirname(__file__)}/fixtures/tsp-n20*.h5")) return _h5_fixture("stab*.h5", request)
@pytest.fixture() @pytest.fixture()

@ -0,0 +1,23 @@
from os.path import dirname
import numpy as np
from scipy.stats import uniform, randint
from miplearn.collectors.basic import BasicCollector
from miplearn.io import write_pkl_gz
from miplearn.problems.stab import (
MaxWeightStableSetGenerator,
build_stab_model,
)
np.random.seed(42)
gen = MaxWeightStableSetGenerator(
w=uniform(10.0, scale=1.0),
n=randint(low=50, high=51),
p=uniform(loc=0.5, scale=0.0),
fix_graph=True,
)
data = gen.generate(3)
data_filenames = write_pkl_gz(data, dirname(__file__), prefix="stab-n50-")
collector = BasicCollector()
collector.collect(data_filenames, build_stab_model)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -9,8 +9,7 @@ import numpy as np
from miplearn.h5 import H5File from miplearn.h5 import H5File
from miplearn.problems.stab import ( from miplearn.problems.stab import (
MaxWeightStableSetData, MaxWeightStableSetData,
build_stab_model_pyomo, build_stab_model,
build_stab_model_gurobipy,
) )
from miplearn.solvers.abstract import AbstractModel from miplearn.solvers.abstract import AbstractModel
@ -21,8 +20,7 @@ def test_stab() -> None:
weights=np.array([1.0, 1.0, 1.0, 1.0, 1.0]), weights=np.array([1.0, 1.0, 1.0, 1.0, 1.0]),
) )
for model in [ for model in [
build_stab_model_pyomo(data), build_stab_model(data),
build_stab_model_gurobipy(data),
]: ]:
assert isinstance(model, AbstractModel) assert isinstance(model, AbstractModel)
with NamedTemporaryFile() as tempfile: with NamedTemporaryFile() as tempfile:

@ -39,6 +39,6 @@ def _build_model() -> PyomoModel:
def test_pyomo_callback() -> None: def test_pyomo_callback() -> None:
model = _build_model() model = _build_model()
model.optimize() model.optimize()
assert model.lazy_constrs_ is not None assert model.lazy_ is not None
assert len(model.lazy_constrs_) > 0 assert len(model.lazy_) > 0
assert model.inner.x.value == 0.0 assert model.inner.x.value == 0.0

Loading…
Cancel
Save