373 Commits

Author SHA1 Message Date
3220337e37 Bump version: miplearn-0.2.0.dev12 2021-08-26 06:16:16 -05:00
35272e08c6 Primal: Skip non-binary variables 2021-08-18 10:34:56 -05:00
5b3a56f053 Re-add sample.{get,put}_bytes 2021-08-11 06:24:10 -05:00
256d3d094f AlvLouWeh2017: Remove sample argument 2021-08-11 06:17:57 -05:00
a65ebfb17c Re-enable half-precision; minor changes to FeaturesExtractor benchmark 2021-08-10 17:30:16 -05:00
9cfb31bacb Remove {get,put}_set and deprecated functions 2021-08-10 17:27:06 -05:00
ed58242b5c Remove most usages of put_{vector,vector_list}; deprecate get_set 2021-08-10 11:52:02 -05:00
60b9a6775f Use NumPy to compute AlvLouWeh2017 features 2021-08-10 10:28:30 -05:00
e852d5cdca Use np.ndarray for constraint methods in Instance 2021-08-10 07:09:42 -05:00
895cb962b6 Make get_variable_{categories,features} return np.ndarray 2021-08-09 15:19:53 -05:00
56b39b6c9c Make get_instance_features return np.ndarray 2021-08-09 14:02:14 -05:00
47d3011808 Use np.ndarray in instance features 2021-08-09 10:01:58 -05:00
63eff336e2 Implement sample.{get,put}_sparse 2021-08-09 07:09:02 -05:00
5b54153a3a Use np in Constraints.lazy; replace some get_vector 2021-08-09 06:27:03 -05:00
f809dd7de4 Use np.ndarray in Constraints.{basis_status,senses} 2021-08-09 06:09:26 -05:00
9ddda7e1e2 Use np.ndarray for constraint names 2021-08-09 05:41:01 -05:00
45667ac2e4 Use np.ndarray for var_types, basis_status 2021-08-08 07:36:57 -05:00
7d55d6f34c Use np.array for Variables.names 2021-08-08 07:24:14 -05:00
f69067aafd Implement {get,put}_array; make other methods deprecated 2021-08-08 06:52:24 -05:00
0a32586bf8 Use np.ndarray in Constraints 2021-08-05 15:57:02 -05:00
0c4b0ea81a Use np.ndarray in Variables 2021-08-05 15:42:19 -05:00
b6426462a1 Fix failing tests 2021-08-05 14:05:50 -05:00
475fe3d985 Sample: do not check data by default; minor fixes 2021-08-05 12:34:55 -05:00
95b9ce29fd Hdf5Sample: Use latest HDF5 file format 2021-08-05 10:18:34 -05:00
4a52911924 AlvLouWeh2017: Replace non-finite features by constant 2021-08-04 13:54:14 -05:00
e72f3b553f Hdf5Sample: Use half-precision for floats 2021-08-04 13:44:42 -05:00
067f0f847c Add mip_ prefix to dynamic constraints 2021-08-04 13:38:23 -05:00
ca925119b3 Add static_ prefix to all static features 2021-08-04 13:35:16 -05:00
10eed9b306 Don't include intermediary features in sample; rename some keys 2021-08-04 13:22:12 -05:00
865a4b2f40 Hdf5Sample: Store string vectors as "S" dtype instead of obj 2021-08-04 11:34:56 -05:00
c513515725 Hdf5Sample: Enable compression 2021-07-28 10:14:55 -05:00
7163472cfc Bump version to 0.2.0.dev11 2021-07-28 09:33:40 -05:00
7d5ec1344a Make Hdf5Sample work with bytearray 2021-07-28 09:06:15 -05:00
a69cbed7b7 Improve error messages in assertions 2021-07-28 08:57:09 -05:00
fc55a077f2 Sample: Allow numpy arrays 2021-07-28 08:21:56 -05:00
6fd839351c GurobiSolver: Fix error messages 2021-07-27 11:50:03 -05:00
b6880f068c Hdf5Sample: store lengths as dataset instead of attr 2021-07-27 11:47:26 -05:00
728a6bc835 Remove debug statement 2021-07-27 11:24:41 -05:00
d30c3232e6 FileInstance.save: create file when it does not already exist 2021-07-27 11:22:40 -05:00
4f14b99a75 Add h5py to setup.py 2021-07-27 11:12:07 -05:00
15e08f6c36 Implement FileInstance 2021-07-27 11:02:04 -05:00
f1dc450cbf Do nothing on put_scalar(None) 2021-07-27 10:55:19 -05:00
6c98986675 Hdf5Sample: Return None for non-existing keys 2021-07-27 10:49:30 -05:00
a0f8bf15d6 Handle completely empty veclists 2021-07-27 10:45:11 -05:00
3da8d532a8 Sample: handle None in vectors 2021-07-27 10:37:02 -05:00
284ba15db6 Implement sample.{get,put}_bytes 2021-07-27 10:01:32 -05:00
962707e8b7 Replace push_sample by create_sample 2021-07-27 09:25:40 -05:00
4224586d10 Remove sample.{get,set} 2021-07-27 09:00:04 -05:00
ef9c48d79a Replace Hashable by str 2021-07-15 16:21:40 -05:00
8d89285cb9 Implement {get,put}_vector_list 2021-07-15 16:00:13 -05:00
8fc7c6ab71 Split Sample.{get,put} into {get,put}_{scalar,vector} 2021-07-14 10:50:54 -05:00
0a399deeee Implement Hdf5Sample 2021-07-14 09:56:25 -05:00
021a71f60c Reorganize feature tests; add basic sample tests 2021-07-14 08:39:19 -05:00
235c3e55c2 Make Sample abstract; create MemorySample 2021-07-14 08:31:01 -05:00
851b8001bb Move features to its own package 2021-07-14 08:23:52 -05:00
ed77d548aa Remove unused function 2021-07-14 08:16:49 -05:00
609c5c7694 Rename Variables and Constraints; move to internal.py 2021-07-06 17:08:22 -05:00
c8c29138ca Remove unused classes and functions 2021-07-06 17:04:32 -05:00
cd9e5d4144 Remove sample.after_load 2021-07-06 16:58:09 -05:00
b4a267a524 Remove sample.after_lp 2021-07-01 12:25:50 -05:00
4093ac62fd Remove sample.after_mip 2021-07-01 11:45:19 -05:00
7c4c301611 Extract instance, var and constr features into sample 2021-07-01 11:06:36 -05:00
061b1349fe Move user_cuts/lazy_enforced to sample.data 2021-07-01 08:46:27 -05:00
80281df8d8 Replace instance.samples by instance.get/push_sample 2021-06-29 16:49:24 -05:00
a5092cc2b9 Request constraint features/categories in bulk 2021-06-29 09:54:35 -05:00
8118ab4110 Remove EnforceOverrides 2021-06-29 09:05:14 -05:00
438859e493 Request variable features/categories in bulk 2021-06-29 09:02:46 -05:00
6969f2ffd2 Measure time extracting features 2021-06-29 07:52:04 -05:00
5b4b8adee5 LearningSolver: add extract_sa, extract_lhs arguments 2021-06-28 17:34:15 -05:00
101bd94a5b Make read/write_pickle_gz quiet 2021-06-28 10:17:41 -05:00
46a7d3fe26 BenchmarkRunner.fit: Only iterate through files twice 2021-06-28 09:32:30 -05:00
aaef8b8fb3 Bump version to 0.2.0.dev10 2021-06-28 09:32:30 -05:00
173d73b718 setup.py: Require numpy<1.21 2021-05-26 10:05:02 -05:00
343afaeec0 Fix MyPy errors 2021-05-26 09:49:58 -05:00
4c7e63409d Improve logging 2021-05-26 09:01:40 -05:00
476c27d0d9 Merge branch 'feature/sphinx' into dev 2021-05-24 09:34:11 -05:00
3f117e9171 Replace mkdocs by sphinx 2021-05-24 09:33:45 -05:00
ddd136c661 assert_equals: Handle ndarray with booleans 2021-05-20 11:38:35 -05:00
52093eb1c0 Combine np.ndarray conversion with rounding 2021-05-20 11:18:17 -05:00
34c71796e1 assert_equals: Recursively convert np.ndarray 2021-05-20 11:06:58 -05:00
cdd38cdfb8 Make assert_equals work with np.ndarray 2021-05-20 10:41:38 -05:00
310394b397 Bump to 0.2.0.dev9 2021-05-20 10:26:40 -05:00
81b7047c4c gurobi.py: Remove tuples 2021-05-20 10:25:56 -05:00
c494f3e804 Remove tuples from ConstraintFeatures 2021-05-20 10:23:53 -05:00
f9ac65bf9c Remove tuples from VariableFeatures 2021-05-20 10:03:18 -05:00
fa969cf066 Constraint features: Fix conversion to list 2021-05-20 08:54:18 -05:00
659131c8cf Only use p_tqdm is n_jobs>1 2021-05-20 08:39:51 -05:00
983e5fe117 Add docs-sphinx 2021-05-20 08:36:34 -05:00
13373c2573 Bump version to 0.2.0.dev6 2021-05-18 09:25:26 -05:00
4bf4d09cb5 Remove unused classes and methods 2021-05-15 14:29:11 -05:00
91c8db2225 Refactor StaticLazy; remove old constraint methods 2021-05-15 14:15:48 -05:00
53d3e9d98a Implement ConstraintFeatures.__getitem__ 2021-05-15 09:38:00 -05:00
83c46d70a3 Implement bulk constraint methods 2021-05-15 09:26:55 -05:00
8e61b7be5f Remove EnforceOverrides 2021-05-10 13:31:43 -05:00
17d4bc6ab9 Remove empty docstring 2021-05-10 10:52:02 -05:00
249002dcf3 Fix mypy issues 2021-04-30 11:55:08 -05:00
c3d26a1c75 Reduce memory consumption of parallel_solve 2021-04-30 11:54:55 -05:00
0ba8cc16fd GurobiSolver: Implement relax/enforce constraint 2021-04-15 15:22:12 -05:00
4dd4ef52bd Add with_lhs argument 2021-04-15 12:39:48 -05:00
18521331c9 Extract more features to ConstraintFeatures 2021-04-15 12:21:19 -05:00
230d13a5c0 Create ConstraintFeatures 2021-04-15 11:49:58 -05:00
0e9c8b0a49 Rename features.constraints to constraints_old 2021-04-15 11:00:52 -05:00
8f73d87d2d Fix failing test 2021-04-15 10:49:48 -05:00
39597287a6 Make extractor configurable 2021-04-15 09:57:10 -05:00
95e326f5f6 Use compact variable features everywhere 2021-04-15 09:49:35 -05:00
fec0113722 Rename features.variables to variables_old; update FeatureExtractor 2021-04-15 06:54:27 -05:00
08f0bedbe0 Implement more compact get_variables 2021-04-15 06:26:33 -05:00
e6eca2ee7f GurobiSolver: Performance improvements 2021-04-15 04:12:10 -05:00
e1f32b1798 Add n_jobs to BenchmarkRunner.fit 2021-04-13 19:30:42 -05:00
77b10b9609 Parallel processing 2021-04-13 19:28:18 -05:00
bec7dae6d9 Add pre argument to sample_xy 2021-04-13 19:19:49 -05:00
a01c179341 LearningSolver: Load each instance exactly twice during fit 2021-04-13 18:11:37 -05:00
ef7a50e871 Only include static features in after-load 2021-04-13 16:08:30 -05:00
8f41278713 GurobiSolver: Improve get_constraints 2021-04-13 15:35:20 -05:00
37a1bc9fe6 Fix mypy errors 2021-04-13 14:36:20 -05:00
61645491a4 GurobiSolver: Bulk query 2021-04-13 10:54:01 -05:00
25affca3ec GurobiSolver: Accept integer variables, as long as bounds=(0,1) 2021-04-13 10:39:36 -05:00
c4a6665825 Remove obsolete methods 2021-04-13 09:42:25 -05:00
c26b852c67 Update UserCutsComponent 2021-04-13 09:08:49 -05:00
a4433916e5 Update DynamicLazyConstraintsComponent 2021-04-13 08:42:06 -05:00
b5411b8950 Update ObjectiveValueComponent 2021-04-13 07:53:23 -05:00
a9dcdb8e4e Update PrimalSolutionComponent 2021-04-13 07:23:07 -05:00
d7aa31f3eb Fix mypy errors 2021-04-13 06:47:31 -05:00
9d404f29a7 Call new fit method 2021-04-12 10:30:47 -05:00
cb62345acf Refactor StaticLazy 2021-04-12 10:05:17 -05:00
e6672a45a0 Rename more methods to _old 2021-04-12 08:55:01 -05:00
08ede5db09 Component: add new callback methods 2021-04-12 08:34:46 -05:00
6f6cd3018b Rewrite DynamicLazy.sample_xy 2021-04-12 08:11:39 -05:00
bccf0e9860 Rewrite StaticLazy.sample_xy 2021-04-12 07:35:51 -05:00
2979bd157c Rewrite PrimalSolutionComponent.sample_xy 2021-04-11 21:52:59 -05:00
d90d7762e3 Rewrite ObjectiveValueComponent.sample_xy 2021-04-11 21:27:25 -05:00
2da60dd293 Rename methods that use TrainingSample to _old 2021-04-11 21:00:04 -05:00
5fd13981d4 Append sample 2021-04-11 17:39:55 -05:00
fde6dc5a60 Combine after_load, after_lp and after_mip into Sample dataclass 2021-04-11 17:20:17 -05:00
2d4ded1978 Fix some mypy issues 2021-04-11 17:07:45 -05:00
16630b3a36 GurobiPyomoSolver: Extract same features as GurobiSolver 2021-04-11 17:05:41 -05:00
6bc81417ac Sort methods 2021-04-11 16:50:00 -05:00
fcb511a2c6 Pyomo: Collect variable reduced costs 2021-04-11 16:30:00 -05:00
3cfadf4e97 Pyomo: Collect variable bounds, obj_coeff, value, type 2021-04-11 16:21:31 -05:00
6b15337e4c Add mip_stats to after-mip features 2021-04-11 09:14:05 -05:00
bd78518c1f Convert MIPSolveStats into dataclass 2021-04-11 09:10:14 -05:00
2bc1e21f8e Add lp_stats to after-lp features 2021-04-11 08:57:57 -05:00
945f6a091c Convert LPSolveStats into dataclass 2021-04-11 08:41:50 -05:00
6afdf2ed55 Collect features 3 times (after-load, after-lp, after-mip) 2021-04-11 08:03:46 -05:00
d85a63f869 Small fixes to Alvarez2017 features 2021-04-11 08:03:17 -05:00
c39231cb18 Implement a small subset of Alvarez2017 features 2021-04-10 19:48:58 -05:00
9ca4cc3c24 Include additional features in instance.features 2021-04-10 19:11:38 -05:00
733c8299e0 Add more variable features 2021-04-10 18:56:59 -05:00
5e1f26e4b0 Add more constraint features 2021-04-10 17:38:03 -05:00
b5e602cdc1 get_constraints: Fetch slack and dual values 2021-04-10 17:24:03 -05:00
088d679f61 Redesign InternalSolver constraint methods 2021-04-10 15:53:38 -05:00
f70363db0d Replace build_lazy_constraint by enforce_lazy_constraint 2021-04-10 10:05:30 -05:00
735884151d Reorganize callbacks 2021-04-10 09:04:34 -05:00
6ac738beb4 PyomoSolver: Implement missing constraint methods 2021-04-09 22:31:17 -05:00
9368b37139 Replace individual constraint methods by single get_constraints 2021-04-09 21:51:38 -05:00
626d75f25e Reorganize internal solver tests 2021-04-09 20:33:48 -05:00
a8224b5a38 Move instance fixtures into the main source; remove duplication 2021-04-09 19:07:46 -05:00
f3fd1e0cda Make internal_solvers into a fixture 2021-04-09 18:35:19 -05:00
31d0a0861d Bump version to 0.2.0.dev3 2021-04-09 09:06:28 -05:00
5d7c2ea089 Require Python 3.7+ 2021-04-09 09:04:34 -05:00
4e230c2120 Move all dependencies to setup.py 2021-04-09 09:01:09 -05:00
7d3b065a3e Add Overrides to setup.py; bump to 0.2.0.dev2 2021-04-09 08:29:15 -05:00
3f4336f902 Always remove .mypy_cache; fix more mypy tests 2021-04-09 08:18:54 -05:00
32b6a8f3fa Bump version to 0.2.0.dev1 2021-04-09 08:08:16 -05:00
166cdb81d7 Fix tests 2021-04-09 07:59:52 -05:00
57624bd75c Update gitignore 2021-04-09 07:53:14 -05:00
c66a59d668 Make version a pre-release 2021-04-09 07:53:14 -05:00
74ceb776c3 Skip extracting features if already computed 2021-04-09 07:53:14 -05:00
5aa434b439 Fix failing mypy tests 2021-04-09 07:41:23 -05:00
5116681291 Add some InternalSolver tests to main package 2021-04-08 11:23:56 -05:00
3edc8139e9 Improve logging 2021-04-08 11:23:30 -05:00
6330354c47 Remove EnforceOverrides; automatically convert np.ndarray features 2021-04-08 07:50:16 -05:00
157825a345 mypy: Disable implicit optionals 2021-04-07 21:36:37 -05:00
e9cd6d1715 Add types to remaining files; activate mypy's disallow_untyped_defs 2021-04-07 21:25:30 -05:00
f5606efb72 Add types to log.py 2021-04-07 21:01:21 -05:00
331ee5914d Add types to solvers 2021-04-07 20:58:44 -05:00
38212fb858 Add types to tsp.py 2021-04-07 20:33:28 -05:00
f7545204d7 Add types to stab.py 2021-04-07 20:25:59 -05:00
2c93ff38fc Add types to knapsack.py 2021-04-07 20:21:28 -05:00
0232219a0e Make InternalSolver clonable 2021-04-07 19:52:21 -05:00
ebccde6a03 Update CHANGELOG.md 2021-04-07 17:52:20 -05:00
0516d4a802 Update CHANGELOG.md 2021-04-07 16:44:03 -05:00
d76dc768b0 Add CHANGELOG.md 2021-04-07 15:32:43 -05:00
1380165e3d Benchmark: Reduce time limit during training 2021-04-07 12:07:17 -05:00
96093a9b8e Enforce more overrides 2021-04-07 12:01:05 -05:00
1cf6124757 Refer to variables by varname instead of (vname, index) 2021-04-07 11:56:05 -05:00
856b595d5e PickleGzInstance: Replace implicit load by load/free methods 2021-04-06 19:23:08 -05:00
f495297168 Remove experimental LP components 2021-04-06 17:00:51 -05:00
f90f295620 Reorganize instance package 2021-04-06 16:31:47 -05:00
3543a2ba92 Optimize imports 2021-04-06 16:23:55 -05:00
332cdbd839 Update copyright year 2021-04-06 16:22:56 -05:00
b0bf42e69d Remove obsolete extractor classes 2021-04-06 16:18:26 -05:00
9e7eed1dbd Finish rewrite of user cuts component 2021-04-06 16:17:05 -05:00
9f2d7439dc Add user cut callbacks; begin rewrite of UserCutsComponent 2021-04-06 12:46:37 -05:00
cfb17551f1 Make sample_xy an instance method 2021-04-06 11:24:56 -05:00
54c20382c9 Finish DynamicLazyConstraintsComponent rewrite 2021-04-06 08:19:29 -05:00
c6aee4f90d Make sample_ method accept instance 2021-04-06 06:48:47 -05:00
bb91c83187 LazyDynamic: Rewrite fit method 2021-04-06 06:28:23 -05:00
6e326d5d6e Move feature classes to features.py 2021-04-05 20:38:31 -05:00
b11779817a Convert TrainingSample to dataclass 2021-04-05 20:36:04 -05:00
aeed338837 Convert ConstraintFeatures to dataclass 2021-04-05 20:12:07 -05:00
94084e0669 Convert InstanceFeatures into dataclass 2021-04-05 20:02:24 -05:00
d79eec5da6 Convert VariableFeatures into dataclass 2021-04-04 22:56:26 -05:00
59f4f75a53 Convert Features into dataclass 2021-04-04 22:37:16 -05:00
f2520f33fb Correctly store features and training data for file-based instances 2021-04-04 22:00:21 -05:00
025e08f85e LazyStatic: Use dynamic thresholds 2021-04-04 20:42:04 -05:00
08e808690e Replace InstanceIterator by PickleGzInstance 2021-04-04 14:56:33 -05:00
b4770c6c0a Fix failing tests 2021-04-04 08:55:55 -05:00
96e7a0946e pre-commit: Use specific version of Black 2021-04-04 08:55:37 -05:00
b70aa1574e Update Makefile and GH actions 2021-04-04 08:47:49 -05:00
6e614264b5 StaticLazy: Refactor 2021-04-04 08:39:56 -05:00
168f56c296 Fix typos 2021-04-03 19:13:00 -05:00
ea5c35fe18 Objective: Refactoring 2021-04-03 19:10:29 -05:00
185b95118a Objective: Rewrite sample_evaluate 2021-04-03 18:37:49 -05:00
7af22bd16b Refactor ObjectiveValueComponent 2021-04-03 10:24:05 -05:00
8e1ed6afcb GitHub Actions: Run tests daily 2021-04-03 08:43:43 -05:00
c02b116d8e Fix decorator version 2021-04-03 08:36:51 -05:00
674c16cbed FeaturesExtractor: Fix assertion 2021-04-03 08:27:30 -05:00
ca555f785a GitHub Actions: Use specific version of Black 2021-04-03 08:00:22 -05:00
d8747289dd Remove benchmark results from repository 2021-04-03 07:58:17 -05:00
7a6b31ca9a Fix benchmark scripts; add more input checks 2021-04-03 07:57:22 -05:00
0bce2051a8 Redesign component.evaluate 2021-04-02 08:10:08 -05:00
0c687692f7 Make all before/solve callbacks receive same parameters 2021-04-02 07:05:16 -05:00
8eb2b63a85 Primal: Refactor stats 2021-04-02 06:32:44 -05:00
ef556f94f0 Rename xy_sample to xy 2021-04-02 06:26:48 -05:00
bc8fe4dc98 Components: Switch from factory methods to prototype objects 2021-04-01 08:34:56 -05:00
59c734f2a1 Add ScikitLearnRegressor; move sklean classes to their own file 2021-04-01 07:54:14 -05:00
820a6256c2 Make classifiers and regressors clonable 2021-04-01 07:41:59 -05:00
ac29b5213f Objective: Add tests 2021-04-01 07:21:44 -05:00
b83911a91d Primal: Add end-to-end tests 2021-03-31 12:51:18 -05:00
db2f426140 Primal: reactivate before_solve_mip 2021-03-31 12:08:49 -05:00
fe7bad885c Make xy_sample receive features, not instances 2021-03-31 10:05:59 -05:00
8fc9979b37 Use instance.features in LazyStatic and Objective 2021-03-31 09:21:34 -05:00
5db4addfa5 Add instance-level features to instance.features 2021-03-31 09:14:06 -05:00
0f5a6745a4 Primal: Refactoring 2021-03-31 09:08:01 -05:00
4f46866921 Primal: Use instance.features 2021-03-31 08:22:43 -05:00
12fca1f22b Extract all features ahead of time 2021-03-31 07:42:01 -05:00
b3c24814b0 Refactor PrimalSolutionComponent 2021-03-31 06:55:24 -05:00
ec69464794 Refactor primal 2021-03-30 21:44:13 -05:00
9cf28f3cdc Add variables to model features 2021-03-30 21:29:33 -05:00
1224613b1a Implement component.fit, component.fit_xy 2021-03-30 21:18:40 -05:00
205a972937 Add StaticLazyComponent.xy 2021-03-30 20:45:22 -05:00
07388d9490 Remove unused composite component 2021-03-30 17:25:50 -05:00
64a63264c7 Rename xy to xy_sample 2021-03-30 17:24:27 -05:00
e8adeb28a3 Add ObjectiveValueComponent.xy 2021-03-30 17:17:29 -05:00
9266743940 Add Component.xy and PrimalSolutionComponent.xy 2021-03-30 17:08:10 -05:00
75d1eee424 DropRedundant: Make x_y parallel 2021-03-30 10:06:55 -05:00
3b61a15ead Add after_solve_lp callback; make dict keys consistent 2021-03-30 10:05:28 -05:00
6ae052c8d0 Rename before/after_solve to before/after_solve_mip 2021-03-30 09:04:41 -05:00
bcaf26b18c Sklearn: Handle the special case when all labels are the same 2021-03-02 19:31:12 -06:00
b6ea0c5f1b ConstraintFeatures: Store lhs and sense 2021-03-02 18:14:36 -06:00
3a60deac63 LearningSolver: Handle exceptions in parallel_solve 2021-03-02 17:27:50 -06:00
bca6581b0f DropRedundant: Clear pool before each solve 2021-03-02 17:27:50 -06:00
1397937f03 Add first model feature (constraint RHS) 2021-03-02 17:21:05 -06:00
31ca45036a Organize test fixtures; handle infeasibility in DropRedundant 2021-02-02 10:24:51 -06:00
8153dfc825 DropRedundant: Update for new classifier interface 2021-02-02 09:26:16 -06:00
d3c5371fa5 VarIndex: Use tuples instead of lists 2021-02-02 09:10:49 -06:00
d1bbe48662 GurobiSolver: Small fix to _update_vars 2021-02-02 08:40:44 -06:00
Feng
b97ead8aa2 Update about.md 2021-01-27 21:34:15 -06:00
Feng
7885ce83bd Update about.md 2021-01-27 21:32:33 -06:00
9abcea05cd Objective: Use LP value as feature 2021-01-26 22:28:20 -06:00
fe47b0825f Remove unused extractors 2021-01-26 22:20:18 -06:00
603902e608 Refactor ObjectiveComponent 2021-01-26 22:16:46 -06:00
2e845058fc Update benchmark script 2021-01-26 20:38:37 -06:00
4d4e2a3eef Fix tests on Python 3.7 2021-01-25 18:13:03 -06:00
edd0c8d750 Remove RelaxationComponent 2021-01-25 18:12:52 -06:00
a97089fc34 Primal: Add tolerance in binary check 2021-01-25 17:54:41 -06:00
a0062edb5a Update benchmark scripts 2021-01-25 17:54:23 -06:00
203afc6993 Primal: Compute statistics 2021-01-25 16:02:40 -06:00
b0b013dd0a Fix all tests 2021-01-25 15:19:58 -06:00
3ab3bb3c1f Refactor PrimalSolutionComponent 2021-01-25 14:54:58 -06:00
f68cc5bd59 Refactor thresholds 2021-01-25 09:52:49 -06:00
4da561a6a8 AdaptiveClassifier: Refactor and add tests 2021-01-25 08:59:06 -06:00
8dba65dd9c Start refactoring of classifiers 2021-01-22 11:35:29 -06:00
b87ef651e1 Document and simplify Classifier and Regressor 2021-01-22 09:06:04 -06:00
f90d78f802 Move tests to separate folder 2021-01-22 07:42:28 -06:00
e2048fc659 Docs: Minor fixes to CSS 2021-01-22 07:30:33 -06:00
ea4bdd38be Fix broken links in documentation 2021-01-22 07:24:39 -06:00
f755661fa6 Simplify BenchmarkRunner; update docs 2021-01-22 07:22:19 -06:00
aa9cefb9c9 GitHub Actions: Remove python 3.9 (no xpresss available) 2021-01-21 18:55:31 -06:00
c342a870d1 Minor fixes to docstrings; make some classes private 2021-01-21 18:54:05 -06:00
7dbbfdc418 Minor fixes 2021-01-21 18:21:53 -06:00
f7ce441fa6 Add types to internal solvers 2021-01-21 17:19:28 -06:00
d500294ebd Add more types to LearningSolver 2021-01-21 16:33:55 -06:00
fc0835e694 Add type annotations to components 2021-01-21 15:54:23 -06:00
a98a783969 Update tests 2021-01-21 14:38:12 -06:00
a42c5ebdc3 Remove unused methods 2021-01-21 14:27:28 -06:00
868675ecf2 Implement some constraint methods in Pyomo 2021-01-21 14:24:06 -06:00
13e142432a Add types to remaining InternalSolver methods 2021-01-21 14:02:18 -06:00
fb887d2444 Update README.md 2021-01-21 13:03:18 -06:00
0cf963e873 Fix tests for Python 3.6 2021-01-21 13:01:14 -06:00
6890840c6d InternalSolver: Better specify and test infeasibility 2021-01-21 09:15:14 -06:00
05497cab07 Merge branch 'feature/training_sample' into dev 2021-01-21 08:32:20 -06:00
372d6eb066 Instance: Reformat comments 2021-01-21 08:29:38 -06:00
a1b959755c Fix solve_lp_first=False and add tests 2021-01-21 08:25:57 -06:00
06402516e6 Move collected data to instance.training_data 2021-01-21 08:21:40 -06:00
23dd311d75 Reorganize imports; start moving data to instance.training_data 2021-01-20 12:02:25 -06:00
947189f25f Disallow untyped calls and incomplete defs 2021-01-20 10:48:03 -06:00
7555f561f8 Use TypedDict from typing_extensions 2021-01-20 10:25:22 -06:00
3b2413291e Add mypy to requirements 2021-01-20 10:13:11 -06:00
87dc9f5f11 Remove scipy requirement 2021-01-20 10:11:03 -06:00
1971389a57 Add types to InternalSolver 2021-01-20 10:07:28 -06:00
69a82172b9 Fix some _compute_gap corner cases; add tests 2021-01-20 08:56:02 -06:00
a536d2ecc6 Fix various warnings 2021-01-19 22:52:39 -06:00
36061d5a14 Make _compute_gap static 2021-01-19 22:32:29 -06:00
9ddb952db0 Make LearningSolver.add internal 2021-01-19 22:32:05 -06:00
4b8672870a Add XpressPyomoSolver 2021-01-19 22:28:39 -06:00
34e1711081 Remove incorrect import 2021-01-19 22:02:17 -06:00
0371b2c7a9 Simply Pyomo solvers 2021-01-19 21:54:37 -06:00
185025e86c Remove methods to set solver parameters 2021-01-19 21:38:01 -06:00
ffc77075f5 Require a callable as the internal solver 2021-01-19 21:21:39 -06:00
3ff773402d Remove unused variable 2021-01-19 20:33:06 -06:00
fb006a7880 Merge branch 'dev' of github.com:iSoron/miplearn into dev 2021-01-19 09:52:51 -06:00
872ef0eb06 Benchmark: Move relative statistics to benchmark script 2021-01-19 09:47:29 -06:00
96a57efd25 Update .gitignore 2021-01-19 09:09:42 -06:00
23b38727a2 Benchmark: Update Makefile 2021-01-19 09:09:16 -06:00
d7aac56bd9 Benchmark: Remove unused save_chart; load multiple results 2021-01-19 09:09:03 -06:00
f05db85df8 Benchmark: Avoid loading instances to memory 2021-01-19 09:07:55 -06:00
aecc3a311f Merge branch 'feature/convert-ineqs' into dev 2021-01-19 07:22:14 -06:00
3efc92742d Merge pull request #3 from GregorCH/dev-robust-gap
Make gap computation robust against missing upper/lower bounds
2021-01-15 08:01:55 -06:00
Gregor Hendel
601bfa261a make gap computation robust against missing upper/lower bounds 2021-01-15 08:43:54 +01:00
088a4a0355 Fix formatting 2021-01-14 21:01:42 -06:00
5a062ad97e ConvertTight: Use x function from DropRedundant 2021-01-14 21:01:34 -06:00
fab7b5419b BenchmarkRunner: Create parent dirs in save_results 2021-01-14 21:00:52 -06:00
622d132ba2 Update package description 2021-01-14 18:35:17 -06:00
0ff16040b2 Update package description 2021-01-14 18:26:23 -06:00
137247aed9 GurobiSolver: Randomize seed 2021-01-14 11:31:40 -06:00
7e4b1d77a3 DropRedundant: Collect data from multiple runs 2021-01-14 11:27:47 -06:00
e12a896504 Add training_data argument to after_solve 2021-01-14 10:37:48 -06:00
30d6ea0a9b Benchmark: Include solver log in results file 2021-01-14 10:00:58 -06:00
beee252fa2 simulate_perfect: Do not overwrite original file 2021-01-13 11:04:33 -06:00
b01d97cc2b ConvertTight: Always check feasibility 2021-01-13 09:28:55 -06:00
d67af4a26b ConvertTight: Detect and fix sub-optimality 2021-01-12 11:56:25 -06:00
c9ad7a3f56 Benchmark: Add extra columns to CSV 2021-01-12 11:22:42 -06:00
f77d1d5de9 ConvertTight: Detect and fix infeasibility 2021-01-12 10:05:57 -06:00
e59386f941 Update .gitignore 2021-01-12 07:59:39 -06:00
dfe0239dff LearningSolver: Implement simulate_perfect 2021-01-12 07:54:58 -06:00
bdfe343fea Silence debug statements 2021-01-12 07:54:32 -06:00
7f55426909 Remove debug print statements 2021-01-11 10:26:44 -06:00
1a04482a20 Small improvements to benchmark scripts 2021-01-11 10:26:44 -06:00
3f1aec7fad RelaxationComponent: Always use np arrays 2021-01-07 12:29:43 -06:00
4057a65506 ConvertTightIneqs: Convert only inequalities, not equalities 2021-01-07 11:54:00 -06:00
1e3d4482f4 ConvertTightIneqs: Reduce default slack_tolerance to zero 2021-01-07 11:07:12 -06:00
317e16d471 ConvertTight: Don't take any action on constraints with negative slack 2021-01-07 11:03:02 -06:00
ec00f7555a Export steps 2021-01-07 10:34:38 -06:00
d8dc8471aa Implement tests for ConvertTightIneqsIntoEqsStep 2021-01-07 10:29:22 -06:00
0377b5b546 Minor changes to docstrings 2021-01-07 10:08:14 -06:00
191da25cfc Split relaxation.py into multiple files 2021-01-07 10:01:04 -06:00
144ee668e9 Fix failing tests 2021-01-07 09:41:55 -06:00
28e2ba7c01 Update README.md 2020-12-30 09:15:51 -06:00
c2b0fb5fb0 Create config.yml 2020-12-30 08:40:13 -06:00
8d832bf439 Update issue templates 2020-12-30 08:39:23 -06:00
6db5a7ccd2 Benchmark: Use default components to generate training data 2020-12-16 07:47:38 -06:00
c1b4ea448d PyomoSolver: Never query values of fixed variables 2020-12-08 13:12:47 -06:00
4a26de5ff1 RelaxationComponent: Convert tight inequalities into equalities 2020-12-05 21:11:08 -06:00
5b5f4b7671 InternalSolver: set_constraint_sense, set_constraint_rhs 2020-12-05 21:09:35 -06:00
8bb9996384 Break down RelaxationComponent into multiple steps 2020-12-05 20:34:29 -06:00
6540c88cc5 Component: Add default implementations to all methods 2020-12-05 20:34:00 -06:00
94b493ac4b Implement CompositeComponent 2020-12-05 20:16:22 -06:00
95672ad529 Update README.md 2020-12-05 11:31:49 -06:00
718ac0da06 Reformat additional files 2020-12-05 11:14:15 -06:00
d99600f101 Reformat source code with Black; add pre-commit hooks and CI checks 2020-12-05 10:59:33 -06:00
3823931382 RelaxationComponent: max_iterations 2020-12-04 10:30:55 -06:00
0b41c882ff Merge branch 'feature/files' into dev 2020-12-04 09:41:23 -06:00
388b10c63c Train without loading all instances to memory 2020-12-04 09:37:41 -06:00
54d80bfa85 RelaxationComponent: Implement check_dropped 2020-12-04 09:33:46 -06:00
51b5d8e549 Component: rename iteration_cb and lazy_cb 2020-12-04 08:35:43 -06:00
87a89eaf96 Update references; add DOI 2020-12-03 12:21:27 -06:00
e7426e445a Make tests compatible with Python 3.7+ 2020-12-03 12:00:32 -06:00
57d185dfc2 Merge branch 'gh-actions' into dev 2020-12-03 11:46:38 -06:00
272eb647fd Switch to GitHub runners; temporarily disable CPLEX 2020-12-03 11:43:13 -06:00
f34bfccf8b Set specific versions for all dependencies 2020-12-03 11:30:12 -06:00
f03cc15b75 Allow solve and parallel_solve to operate on files 2020-10-08 17:48:08 -05:00
131 changed files with 8699 additions and 5148 deletions

26
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,26 @@
---
name: Bug report
about: Something is broken in the package
title: ''
labels: ''
assignees: ''
---
## Description
A clear and concise description of what the bug is.
## Steps to Reproduce
Please describe how can the developers reproduce the problem in their own computers. Code snippets and sample input files are specially helpful. For example:
1. Install the package
2. Run the code below with the attached input file...
3. The following error appears...
## System Information
- Operating System: [e.g. Ubuntu 20.04]
- Python version: [e.g. 3.6]
- Solver: [e.g. Gurobi 9.0]
- Package version: [e.g. 0.1.0]

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Feature Request
url: https://github.com/ANL-CEEESA/MIPLearn/discussions/categories/feature-requests
about: Submit ideas for new features and small enhancements
- name: Help & FAQ
url: https://github.com/ANL-CEEESA/MIPLearn/discussions/categories/help-faq
about: Ask questions about the package and get help from the community

11
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,11 @@
name: Lint
on: [push, pull_request]
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- uses: psf/black@20.8b1

View File

@@ -1,18 +0,0 @@
name: Test
on: push
jobs:
build:
runs-on: self-hosted
steps:
- uses: actions/checkout@v1
- name: Run tests
run: |
rm -rf ~/.conda/envs/miplearn-test
yes | conda create --name miplearn-test python=3.6
(cd /opt/gurobi900/linux64 && ~/.conda/envs/miplearn-test/bin/python setup.py install)
(cd /opt/cplex-12.8/cplex/python/3.6/x86-64_linux && ~/.conda/envs/miplearn-test/bin/python setup.py install)
make install test \
PYTHON=~/.conda/envs/miplearn-test/bin/python \
PIP=~/.conda/envs/miplearn-test/bin/pip3 \
PYTEST=~/.conda/envs/miplearn-test/bin/pytest

27
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: Test
on:
push:
pull_request:
schedule:
- cron: '45 10 * * *'
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.7, 3.8]
steps:
- name: Check out source code
uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: make install-deps
- name: Test
run: make test

12
.gitignore vendored
View File

@@ -1,5 +1,7 @@
TODO.md
.idea
*.gz
done
*.bin
*$py.class
*.cover
@@ -39,8 +41,8 @@ TODO.md
/site
ENV/
MANIFEST
__pycache__/
__pypackages__/
**/__pycache__/
**/__pypackages__/
build/
celerybeat-schedule
celerybeat.pid
@@ -56,7 +58,6 @@ eggs/
env.bak/
env/
htmlcov/
instance/
ipython_config.py
lib/
lib64/
@@ -75,3 +76,8 @@ venv.bak/
venv/
wheels/
notebooks/
.vscode
tmp
benchmark/tsp
benchmark/stab
benchmark/knapsack

7
.mypy.ini Normal file
View File

@@ -0,0 +1,7 @@
[mypy]
ignore_missing_imports = True
disallow_untyped_defs = True
disallow_untyped_calls = True
disallow_incomplete_defs = True
pretty = True
no_implicit_optional = True

6
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,6 @@
repos:
- repo: https://github.com/ambv/black
rev: 20.8b1
hooks:
- id: black
args: ["--check"]

45
CHANGELOG.md Normal file
View File

@@ -0,0 +1,45 @@
# MIPLearn: Changelog
## [0.2.0] - [Unreleased]
### Added
- **Added two new machine learning components:**
- Added `StaticLazyConstraintComponent`, which allows the user to mark some constraints in the formulation as lazy, instead of constructing them in a callback. ML predicts which static lazy constraints should be kept in the formulation, and which should be removed.
- Added `UserCutComponents`, which predicts which user cuts should be generated and added to the formulation as constraints ahead-of-time, before solving the MIP.
- **Added support to additional MILP solvers:**
- Added support for CPLEX and XPRESS, through the Pyomo modeling language, in addition to (existing) Gurobi. The solver classes are named `CplexPyomoSolver`, `XpressPyomoSolver` and `GurobiPyomoSolver`.
- Added support for Gurobi without any modeling language. The solver class is named `GurobiSolver`. In this case, `instance.to_model` should return ` gp.Model` object.
- Added support to direct MPS files, produced externally, through the `GurobiSolver` class mentioned above.
- **Added dynamic thresholds:**
- In previous versions of the package, it was necessary to manually adjust component aggressiveness to reach a desired precision/recall. This can now be done automatically with `MinProbabilityThreshold`, `MinPrecisionThreshold` and `MinRecallThreshold`.
- **Reduced memory requirements:**
- Previous versions of the package required all training instances to be kept in memory at all times, which was prohibitive for large-scale problems. It is now possible to store instances in file until they are needed, using `PickledGzInstance`.
- **Refactoring:**
- Added static types to all classes (with mypy).
### Changed
- Variables are now referenced by their names, instead of tuples `(var_name, index)`. This change was required to improve the compatibility with modeling languages other than Pyomo, which do not follow this convention. For performance reasons, the functions `get_variable_features` and `get_variable_categories` should now return a dictionary containing categories and features for all relevant variables. Previously, MIPLearn had to perform two function calls per variable, which was too slow for very large models.
- Internal solvers must now be specified as objects, instead of strings. For example,
```python
solver = LearningSolver(
solver=GurobiPyomoSolver(
params={
"TimeLimit": 300,
"Threads": 4,
}
)
)
```
- `LazyConstraintComponent` has been renamed to `DynamicLazyConstraintsComponent`.
- Categories, lazy constraints and cutting plane identifiers must now be strings, instead `Hashable`. This change was required for compatibility with HDF5 data format.
### Removed
- Temporarily removed the experimental `BranchPriorityComponent`. This component will be re-added in the Julia version of the package.
- Removed `solver.add` method, previously used to add components to an existing solver. Use the constructor `LearningSolver(components=[...])` instead.
## [0.1.0] - 2020-11-23
- Initial public release

View File

@@ -1,13 +1,14 @@
PYTHON := python3
PYTEST := pytest
PIP := pip3
PYTEST_ARGS := -W ignore::DeprecationWarning -vv -x --log-level=DEBUG
PIP := $(PYTHON) -m pip
MYPY := $(PYTHON) -m mypy
PYTEST_ARGS := -W ignore::DeprecationWarning -vv --log-level=DEBUG
VERSION := 0.2
all: docs test
clean:
rm -rf build
rm -rf build/* dist/*
develop:
$(PYTHON) setup.py develop
@@ -19,19 +20,31 @@ dist-upload:
$(PYTHON) -m twine upload dist/*
docs:
mkdocs build -d ../docs/$(VERSION)/
rm -rf ../docs/$(VERSION)
cd docs; make clean; make dirhtml
rsync -avP --delete-after docs/_build/dirhtml/ ../docs/$(VERSION)
docs-dev:
mkdocs build -d ../docs/dev/
install-deps:
$(PIP) install --upgrade pip
$(PIP) install --upgrade -i https://pypi.gurobi.com gurobipy
$(PIP) install --upgrade xpress
$(PIP) install --upgrade -r requirements.txt
install:
$(PIP) install -r requirements.txt
$(PYTHON) setup.py install
uninstall:
$(PIP) uninstall miplearn
reformat:
$(PYTHON) -m black .
test:
rm -rf .mypy_cache
$(MYPY) -p miplearn
$(MYPY) -p tests
$(MYPY) -p benchmark
$(PYTEST) $(PYTEST_ARGS)
.PHONY: test test-watch docs install
.PHONY: test test-watch docs install dist

View File

@@ -1,12 +1,22 @@
![Build status](https://img.shields.io/github/workflow/status/ANL-CEEESA/MIPLearn/Test)
![BSD License](https://img.shields.io/badge/license-BSD-blue)
<h1 align="center">MIPLearn</h1>
<p align="center">
<a href="https://github.com/ANL-CEEESA/MIPLearn/actions">
<img src="https://github.com/ANL-CEEESA/MIPLearn/workflows/Test/badge.svg">
</a>
<a href="https://doi.org/10.5281/zenodo.4287567">
<img src="https://zenodo.org/badge/DOI/10.5281/zenodo.4287567.svg">
</a>
<a href="https://github.com/ANL-CEEESA/MIPLearn/releases/">
<img src="https://img.shields.io/github/v/release/ANL-CEEESA/MIPLearn?include_prereleases&label=pre-release">
</a>
<a href="https://github.com/ANL-CEEESA/MIPLearn/discussions">
<img src="https://img.shields.io/badge/GitHub-Discussions-%23fc4ebc" />
</a>
</p>
MIPLearn
========
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Linear Programming (MIP) and Machine Learning (ML).
**MIPLearn** is an extensible framework for **Learning-Enhanced Mixed-Integer Optimization**, an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data.
The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see [benchmarks](https://anl-ceeesa.github.io/MIPLearn/0.1/problems/) and [references](https://anl-ceeesa.github.io/MIPLearn/0.1/about/)).
MIPLearn uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers such as CPLEX, Gurobi or XPRESS. Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions. Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value). For certain classes of problems, this approach has been shown to provide significant performance benefits (see [benchmarks](https://anl-ceeesa.github.io/MIPLearn/0.1/problems/) and [references](https://anl-ceeesa.github.io/MIPLearn/0.1/about/)).
Features
--------
@@ -23,6 +33,22 @@ Documentation
For installation instructions, basic usage and benchmarks results, see the [official documentation](https://anl-ceeesa.github.io/MIPLearn/).
Acknowledgments
---------------
* Based upon work supported by **Laboratory Directed Research and Development** (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.
* Based upon work supported by the **U.S. Department of Energy Advanced Grid Modeling Program** under Grant DE-OE0000875.
Citing MIPLearn
---------------
If you use MIPLearn in your research (either the solver or the included problem generators), we kindly request that you cite the package as follows:
* **Alinson S. Xavier, Feng Qiu.** *MIPLearn: An Extensible Framework for Learning-Enhanced Optimization*. Zenodo (2020). DOI: [10.5281/zenodo.4287567](https://doi.org/10.5281/zenodo.4287567)
If you use MIPLearn in the field of power systems optimization, we kindly request that you cite the reference below, in which the main techniques implemented in MIPLearn were first developed:
* **Alinson S. Xavier, Feng Qiu, Shabbir Ahmed.** *Learning to Solve Large-Scale Unit Commitment Problems.* INFORMS Journal on Computing (2020). DOI: [10.1287/ijoc.2020.0976](https://doi.org/10.1287/ijoc.2020.0976)
License
-------

View File

@@ -3,19 +3,19 @@
# Released under the modified BSD license. See COPYING.md for more details.
# Written by Alinson S. Xavier <axavier@anl.gov>
DATAFILE := miplearn-train-data.tar.gz
CHALLENGES := \
stab/ChallengeA \
knapsack/ChallengeA \
tsp/ChallengeA
main: $(addsuffix /performance.png, $(CHALLENGES))
test: $(addsuffix /performance.png, $(CHALLENGES))
%/train_instances.bin:
train: $(addsuffix /train/done, $(CHALLENGES))
%/train/done:
python benchmark.py train $*
%/benchmark_baseline.csv: %/train_instances.bin
%/benchmark_baseline.csv: %/train/done
python benchmark.py test-baseline $*
%/benchmark_ml.csv: %/benchmark_baseline.csv
@@ -27,21 +27,5 @@ main: $(addsuffix /performance.png, $(CHALLENGES))
clean:
rm -rvf $(CHALLENGES)
clean-ml:
rm -rvf */*/benchmark_ml.csv
clean-charts:
rm -rfv */*/performance.png
training-data-push:
tar -cvvzf $(DATAFILE) */*/*.bin
rsync -avP $(DATAFILE) andromeda:/www/axavier.org/projects/miplearn/$(DATAFILE)
rm -fv $(DATAFILE)
training-data-pull:
wget https://axavier.org/projects/miplearn/$(DATAFILE)
tar -xvvzf $(DATAFILE)
rm -f $(DATAFILE)
.PHONY: clean clean-ml clean-charts
.PHONY: clean
.SECONDARY:

0
benchmark/__init__.py Normal file
View File

View File

@@ -1,201 +1,268 @@
#!/usr/bin/env python
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
"""Benchmark script
"""MIPLearn Benchmark Scripts
Usage:
benchmark.py train <challenge>
benchmark.py test-baseline <challenge>
benchmark.py test-ml <challenge>
benchmark.py train [options] <challenge>
benchmark.py test-baseline [options] <challenge>
benchmark.py test-ml [options] <challenge>
benchmark.py charts <challenge>
Options:
-h --help Show this screen
--train-jobs=<n> Number of instances to solve in parallel during training [default: 10]
--train-time-limit=<n> Solver time limit during training in seconds [default: 900]
--test-jobs=<n> Number of instances to solve in parallel during test [default: 5]
--test-time-limit=<n> Solver time limit during test in seconds [default: 900]
--solver-threads=<n> Number of threads the solver is allowed to use [default: 4]
"""
from docopt import docopt
import importlib, pathlib
from miplearn import (LearningSolver, BenchmarkRunner)
from numpy import median
import pyomo.environ as pe
import pickle
import glob
import importlib
import logging
import sys
import os
from pathlib import Path
from typing import Dict, List
logging.basicConfig(format='%(asctime)s %(levelname).1s %(name)s: %(message)12s',
datefmt='%H:%M:%S',
level=logging.INFO,
stream=sys.stdout)
logging.getLogger('gurobipy').setLevel(logging.ERROR)
logging.getLogger('pyomo.core').setLevel(logging.ERROR)
logging.getLogger('miplearn').setLevel(logging.INFO)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from docopt import docopt
from numpy import median
from miplearn import (
LearningSolver,
BenchmarkRunner,
GurobiPyomoSolver,
setup_logger,
PickleGzInstance,
write_pickle_gz_multiple,
Instance,
)
setup_logger()
logging.getLogger("gurobipy").setLevel(logging.ERROR)
logging.getLogger("pyomo.core").setLevel(logging.ERROR)
logger = logging.getLogger("benchmark")
n_jobs = 10
train_time_limit = 3600
test_time_limit = 900
internal_solver = "gurobi"
args = docopt(__doc__)
basepath = args["<challenge>"]
pathlib.Path(basepath).mkdir(parents=True, exist_ok=True)
def save(obj, filename):
logger.info("Writing %s..." % filename)
with open(filename, "wb") as file:
pickle.dump(obj, file)
def load(filename):
import pickle
with open(filename, "rb") as file:
return pickle.load(file)
def train():
def train(args: Dict) -> None:
basepath = args["<challenge>"]
problem_name, challenge_name = args["<challenge>"].split("/")
pkg = importlib.import_module("miplearn.problems.%s" % problem_name)
pkg = importlib.import_module(f"miplearn.problems.{problem_name}")
challenge = getattr(pkg, challenge_name)()
train_instances = challenge.training_instances
test_instances = challenge.test_instances
solver = LearningSolver(time_limit=train_time_limit,
solver=internal_solver,
components={})
solver.parallel_solve(train_instances, n_jobs=n_jobs)
save(train_instances, "%s/train_instances.bin" % basepath)
save(test_instances, "%s/test_instances.bin" % basepath)
if not os.path.isdir(f"{basepath}/train"):
write_pickle_gz_multiple(challenge.training_instances, f"{basepath}/train")
write_pickle_gz_multiple(challenge.test_instances, f"{basepath}/test")
done_filename = f"{basepath}/train/done"
if not os.path.isfile(done_filename):
train_instances: List[Instance] = [
PickleGzInstance(f) for f in glob.glob(f"{basepath}/train/*.gz")
]
solver = LearningSolver(
solver=GurobiPyomoSolver(
params={
"TimeLimit": int(args["--train-time-limit"]),
"Threads": int(args["--solver-threads"]),
}
),
)
solver.parallel_solve(
train_instances,
n_jobs=int(args["--train-jobs"]),
)
Path(done_filename).touch(exist_ok=True)
def test_baseline():
test_instances = load("%s/test_instances.bin" % basepath)
def test_baseline(args: Dict) -> None:
basepath = args["<challenge>"]
test_instances: List[Instance] = [
PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")
]
csv_filename = f"{basepath}/benchmark_baseline.csv"
if not os.path.isfile(csv_filename):
solvers = {
"baseline": LearningSolver(
time_limit=test_time_limit,
solver=internal_solver,
solver=GurobiPyomoSolver(
params={
"TimeLimit": int(args["--test-time-limit"]),
"Threads": int(args["--solver-threads"]),
}
),
),
}
benchmark = BenchmarkRunner(solvers)
benchmark.parallel_solve(test_instances, n_jobs=n_jobs)
benchmark.save_results("%s/benchmark_baseline.csv" % basepath)
benchmark.parallel_solve(
test_instances,
n_jobs=int(args["--test-jobs"]),
)
benchmark.write_csv(csv_filename)
def test_ml():
logger.info("Loading instances...")
train_instances = load("%s/train_instances.bin" % basepath)
test_instances = load("%s/test_instances.bin" % basepath)
def test_ml(args: Dict) -> None:
basepath = args["<challenge>"]
test_instances: List[Instance] = [
PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")
]
train_instances: List[Instance] = [
PickleGzInstance(f) for f in glob.glob(f"{basepath}/train/*.gz")
]
csv_filename = f"{basepath}/benchmark_ml.csv"
if not os.path.isfile(csv_filename):
solvers = {
"ml-exact": LearningSolver(
time_limit=test_time_limit,
solver=internal_solver,
solver=GurobiPyomoSolver(
params={
"TimeLimit": int(args["--test-time-limit"]),
"Threads": int(args["--solver-threads"]),
}
),
),
"ml-heuristic": LearningSolver(
time_limit=test_time_limit,
solver=internal_solver,
solver=GurobiPyomoSolver(
params={
"TimeLimit": int(args["--test-time-limit"]),
"Threads": int(args["--solver-threads"]),
}
),
mode="heuristic",
),
}
benchmark = BenchmarkRunner(solvers)
logger.info("Loading results...")
benchmark.load_results("%s/benchmark_baseline.csv" % basepath)
logger.info("Fitting...")
benchmark.fit(train_instances)
logger.info("Solving...")
benchmark.parallel_solve(test_instances, n_jobs=n_jobs)
benchmark.save_results("%s/benchmark_ml.csv" % basepath)
benchmark.parallel_solve(
test_instances,
n_jobs=int(args["--test-jobs"]),
)
benchmark.write_csv(csv_filename)
def charts():
import matplotlib.pyplot as plt
import seaborn as sns
def charts(args: Dict) -> None:
basepath = args["<challenge>"]
sns.set_style("whitegrid")
sns.set_palette("Blues_r")
benchmark = BenchmarkRunner({})
benchmark.load_results("%s/benchmark_ml.csv" % basepath)
results = benchmark.raw_results()
results["Gap (%)"] = results["Gap"] * 100.0
csv_files = [
f"{basepath}/benchmark_baseline.csv",
f"{basepath}/benchmark_ml.csv",
]
results = pd.concat(map(pd.read_csv, csv_files))
groups = results.groupby("Instance")
best_lower_bound = groups["Lower bound"].transform("max")
best_upper_bound = groups["Upper bound"].transform("min")
results["Relative lower bound"] = results["Lower bound"] / best_lower_bound
results["Relative upper bound"] = results["Upper bound"] / best_upper_bound
sense = results.loc[0, "Sense"]
if sense == "min":
primal_column = "Relative Upper Bound"
obj_column = "Upper Bound"
predicted_obj_column = "Predicted UB"
if (sense == "min").any():
primal_column = "Relative upper bound"
obj_column = "Upper bound"
predicted_obj_column = "Objective: Predicted upper bound"
else:
primal_column = "Relative Lower Bound"
obj_column = "Lower Bound"
predicted_obj_column = "Predicted LB"
primal_column = "Relative lower bound"
obj_column = "Lower bound"
predicted_obj_column = "Objective: Predicted lower bound"
palette={
"baseline": "#9b59b6",
"ml-exact": "#3498db",
"ml-heuristic": "#95a5a6"
}
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,
palette = {"baseline": "#9b59b6", "ml-exact": "#3498db", "ml-heuristic": "#95a5a6"}
fig, (ax1, ax2, ax3, ax4) = plt.subplots(
nrows=1,
ncols=4,
figsize=(12,4),
gridspec_kw={'width_ratios': [2, 1, 1, 2]},
figsize=(12, 4),
gridspec_kw={"width_ratios": [2, 1, 1, 2]},
)
sns.stripplot(x="Solver",
y="Wallclock Time",
# Wallclock time
sns.stripplot(
x="Solver",
y="Wallclock time",
data=results,
ax=ax1,
jitter=0.25,
palette=palette,
size=4.0,
);
sns.barplot(x="Solver",
y="Wallclock Time",
)
sns.barplot(
x="Solver",
y="Wallclock time",
data=results,
ax=ax1,
errwidth=0.,
errwidth=0.0,
alpha=0.4,
palette=palette,
estimator=median,
);
ax1.set(ylabel='Wallclock Time (s)')
)
ax1.set(ylabel="Wallclock time (s)")
# Gap
ax2.set_ylim(-0.5, 5.5)
sns.stripplot(x="Solver",
y="Gap (%)",
sns.stripplot(
x="Solver",
y="Gap",
jitter=0.25,
data=results[results["Solver"] != "ml-heuristic"],
ax=ax2,
palette=palette,
size=4.0,
);
ax3.set_ylim(0.95,1.05)
sns.stripplot(x="Solver",
)
# Relative primal bound
ax3.set_ylim(0.95, 1.05)
sns.stripplot(
x="Solver",
y=primal_column,
jitter=0.25,
data=results[results["Solver"] == "ml-heuristic"],
ax=ax3,
palette=palette,
);
sns.scatterplot(x=obj_column,
)
sns.scatterplot(
x=obj_column,
y=predicted_obj_column,
hue="Solver",
data=results[results["Solver"] == "ml-exact"],
ax=ax4,
palette=palette,
);
)
# Predicted vs actual primal bound
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls='-', color="#cccccc");
ax4.plot(
[-1e10, 1e10],
[-1e10, 1e10],
ls="-",
color="#cccccc",
)
ax4.set_xlim(xlim)
ax4.set_ylim(ylim)
ax4.get_legend().remove()
ax4.set(
ylabel="Predicted value",
xlabel="Actual value",
)
fig.tight_layout()
plt.savefig("%s/performance.png" % basepath,
bbox_inches='tight',
dpi=150)
plt.savefig(
f"{basepath}/performance.png",
bbox_inches="tight",
dpi=150,
)
def main() -> None:
args = docopt(__doc__)
if args["train"]:
train(args)
if args["test-baseline"]:
test_baseline(args)
if args["test-ml"]:
test_ml(args)
if args["charts"]:
charts(args)
if __name__ == "__main__":
if args["train"]:
train()
if args["test-baseline"]:
test_baseline()
if args["test-ml"]:
test_ml()
if args["charts"]:
charts()
main()

View File

@@ -1,51 +0,0 @@
,Solver,Instance,Wallclock Time,Lower Bound,Upper Bound,Gap,Nodes,Mode,Relative Lower Bound,Relative Upper Bound,Relative Wallclock Time,Relative Gap,Relative Nodes
0,baseline,0,662.7372989654541,59162.0,59167.0,8.451370812345763e-05,18688107.0,exact,1.0,1.0,1.0,1.0,1.0
1,baseline,1,900.0007548332214,59137.0,59256.0,0.002012276578115224,24175550.0,exact,1.0,1.0,1.0,1.0,1.0
2,baseline,2,900.0016160011292,59186.0,59285.0,0.0016726928665562802,24089218.0,exact,1.0,1.0,1.0,1.0,1.0
3,baseline,3,900.0023140907288,59145.0,59231.0,0.0014540535970918927,24595759.0,exact,1.0,1.0,1.0,1.0,1.0
4,baseline,4,900.0024960041046,59142.0,59213.0,0.0012005004903452706,25467171.0,exact,1.0,1.0,1.0,1.0,1.0
5,baseline,5,900.002925157547,59126.0,59244.0,0.0019957379156377904,23457042.0,exact,1.0,1.0,1.0,1.0,1.0
6,baseline,6,900.0031039714813,59125.0,59236.97169757604,0.0018938130668251741,24240772.0,exact,1.0,1.0,1.0,1.0,1.0
7,baseline,7,900.002781867981,59105.0,59212.0,0.001810337534895525,24042592.0,exact,1.0,1.0,1.0,1.0,1.0
8,baseline,8,900.0021660327911,59169.0,59251.0,0.0013858608392908448,25512146.0,exact,1.0,1.0,1.0,1.0,1.0
9,baseline,9,900.0015439987183,59130.0,59256.0,0.00213089802130898,23227790.0,exact,1.0,1.0,1.0,1.0,1.0
10,baseline,10,900.0024099349976,59127.0,59201.0,0.0012515432881762985,25015636.0,exact,1.0,1.0,1.0,1.0,1.0
11,baseline,11,900.0025849342346,59198.0,59289.0,0.0015372140950707794,24558832.0,exact,1.0,1.0,1.0,1.0,1.0
12,baseline,12,900.0022029876709,59102.0,59224.0,0.002064227944908802,24026788.0,exact,1.0,1.0,1.0,1.0,1.0
13,baseline,13,900.0011007785797,59150.0,59206.0,0.0009467455621301775,24953207.0,exact,1.0,1.0,1.0,1.0,1.0
14,baseline,14,900.0014700889587,59169.0,59250.0,0.0013689600973482736,25494260.0,exact,1.0,1.0,1.0,1.0,1.0
15,baseline,15,900.0013790130615,59083.0,59196.0,0.0019125636816004605,23792716.0,exact,1.0,1.0,1.0,1.0,1.0
16,baseline,16,900.0020098686218,59126.0,59233.0,0.0018096945506207082,23398798.0,exact,1.0,1.0,1.0,1.0,1.0
17,baseline,17,900.0023510456085,59156.0,59197.0,0.0006930826965988235,25573586.0,exact,1.0,1.0,1.0,1.0,1.0
18,baseline,18,900.002711057663,59118.0,59211.0,0.0015731249365675429,24489136.0,exact,1.0,1.0,1.0,1.0,1.0
19,baseline,19,724.1934628486633,59159.0,59164.0,8.451799388089724e-05,20931760.0,exact,1.0,1.0,1.0,1.0,1.0
20,baseline,20,900.0011439323425,59068.0,59191.0,0.0020823457709758246,23411794.0,exact,1.0,1.0,1.0,1.0,1.0
21,baseline,21,380.06568694114685,59175.0,59180.0,8.449514152936207e-05,11618526.0,exact,1.0,1.0,1.0,1.0,1.0
22,baseline,22,900.0016028881073,59121.0,59154.94711904252,0.0005741973079365614,26352886.0,exact,1.0,1.0,1.0,1.0,1.0
23,baseline,23,230.25152111053467,59193.0,59198.0,8.44694474008751e-05,6776049.0,exact,1.0,1.0,1.0,1.0,1.0
24,baseline,24,900.0010840892792,59162.0,59240.0,0.001318413846725939,24727727.0,exact,1.0,1.0,1.0,1.0,1.0
25,baseline,25,900.0015320777893,59096.0,59210.0,0.001929064572898335,23438919.0,exact,1.0,1.0,1.0,1.0,1.0
26,baseline,26,900.0015478134155,59089.0,59203.0,0.001929293100238623,23826788.0,exact,1.0,1.0,1.0,1.0,1.0
27,baseline,27,900.0010070800781,59153.0,59249.0,0.0016229100806383447,24336831.0,exact,1.0,1.0,1.0,1.0,1.0
28,baseline,28,900.001277923584,59112.0,59208.0,0.0016240357287860333,25111591.0,exact,1.0,1.0,1.0,1.0,1.0
29,baseline,29,900.0012440681458,59182.0,59263.0,0.0013686593896792944,24919871.0,exact,1.0,1.0,1.0,1.0,1.0
30,baseline,30,900.0012910366058,59134.0,59241.0,0.001809449724354855,23615391.0,exact,1.0,1.0,1.0,1.0,1.0
31,baseline,31,900.0023548603058,59082.0,59169.0,0.0014725297044785213,26213904.0,exact,1.0,1.0,1.0,1.0,1.0
32,baseline,32,875.9193549156189,59175.0,59180.0,8.449514152936207e-05,24935695.0,exact,1.0,1.0,1.0,1.0,1.0
33,baseline,33,900.0018489360809,59088.0,59177.0,0.0015062279989168698,25210167.0,exact,1.0,1.0,1.0,1.0,1.0
34,baseline,34,232.1541509628296,59190.0,59195.0,8.447372867038352e-05,7309410.0,exact,1.0,1.0,1.0,1.0,1.0
35,baseline,35,900.0025398731232,59183.0,59262.0,0.001334842775797104,23927493.0,exact,1.0,1.0,1.0,1.0,1.0
36,baseline,36,900.0010929107666,59166.0,59254.0,0.00148734070243045,25589946.0,exact,1.0,1.0,1.0,1.0,1.0
37,baseline,37,622.9371509552002,59202.0,59207.0,8.445660619573663e-05,18595087.0,exact,1.0,1.0,1.0,1.0,1.0
38,baseline,38,557.924427986145,59212.0,59217.0,8.444234276835777e-05,16270407.0,exact,1.0,1.0,1.0,1.0,1.0
39,baseline,39,900.0010092258453,59143.0,59185.0,0.0007101432122144632,26304077.0,exact,1.0,1.0,1.0,1.0,1.0
40,baseline,40,900.0011250972748,59158.0,59242.99535479154,0.0014367516615088902,23949337.0,exact,1.0,1.0,1.0,1.0,1.0
41,baseline,41,900.000893831253,59170.0,59257.0,0.0014703396991718777,24299427.0,exact,1.0,1.0,1.0,1.0,1.0
42,baseline,42,900.0017001628876,59089.0,59228.0,0.002352383692396216,23229681.0,exact,1.0,1.0,1.0,1.0,1.0
43,baseline,43,127.60789799690247,59232.0,59237.0,8.44138303619665e-05,4041704.0,exact,1.0,1.0,1.0,1.0,1.0
44,baseline,44,166.38699293136597,59201.0,59206.0,8.445803280349994e-05,5151689.0,exact,1.0,1.0,1.0,1.0,1.0
45,baseline,45,900.0007989406586,59135.0,59247.0,0.001893971421324089,26922402.0,exact,1.0,1.0,1.0,1.0,1.0
46,baseline,46,900.001415014267,59152.0,59254.0,0.001724371111712199,26485728.0,exact,1.0,1.0,1.0,1.0,1.0
47,baseline,47,900.0020279884338,59123.0,59235.0,0.0018943558344468312,28222784.0,exact,1.0,1.0,1.0,1.0,1.0
48,baseline,48,900.0011022090912,59176.0,59284.0,0.0018250642152223874,28675410.0,exact,1.0,1.0,1.0,1.0,1.0
49,baseline,49,900.0012428760529,59150.0,59206.0,0.0009467455621301775,30531240.0,exact,1.0,1.0,1.0,1.0,1.0
1 Solver Instance Wallclock Time Lower Bound Upper Bound Gap Nodes Mode Relative Lower Bound Relative Upper Bound Relative Wallclock Time Relative Gap Relative Nodes
2 0 baseline 0 662.7372989654541 59162.0 59167.0 8.451370812345763e-05 18688107.0 exact 1.0 1.0 1.0 1.0 1.0
3 1 baseline 1 900.0007548332214 59137.0 59256.0 0.002012276578115224 24175550.0 exact 1.0 1.0 1.0 1.0 1.0
4 2 baseline 2 900.0016160011292 59186.0 59285.0 0.0016726928665562802 24089218.0 exact 1.0 1.0 1.0 1.0 1.0
5 3 baseline 3 900.0023140907288 59145.0 59231.0 0.0014540535970918927 24595759.0 exact 1.0 1.0 1.0 1.0 1.0
6 4 baseline 4 900.0024960041046 59142.0 59213.0 0.0012005004903452706 25467171.0 exact 1.0 1.0 1.0 1.0 1.0
7 5 baseline 5 900.002925157547 59126.0 59244.0 0.0019957379156377904 23457042.0 exact 1.0 1.0 1.0 1.0 1.0
8 6 baseline 6 900.0031039714813 59125.0 59236.97169757604 0.0018938130668251741 24240772.0 exact 1.0 1.0 1.0 1.0 1.0
9 7 baseline 7 900.002781867981 59105.0 59212.0 0.001810337534895525 24042592.0 exact 1.0 1.0 1.0 1.0 1.0
10 8 baseline 8 900.0021660327911 59169.0 59251.0 0.0013858608392908448 25512146.0 exact 1.0 1.0 1.0 1.0 1.0
11 9 baseline 9 900.0015439987183 59130.0 59256.0 0.00213089802130898 23227790.0 exact 1.0 1.0 1.0 1.0 1.0
12 10 baseline 10 900.0024099349976 59127.0 59201.0 0.0012515432881762985 25015636.0 exact 1.0 1.0 1.0 1.0 1.0
13 11 baseline 11 900.0025849342346 59198.0 59289.0 0.0015372140950707794 24558832.0 exact 1.0 1.0 1.0 1.0 1.0
14 12 baseline 12 900.0022029876709 59102.0 59224.0 0.002064227944908802 24026788.0 exact 1.0 1.0 1.0 1.0 1.0
15 13 baseline 13 900.0011007785797 59150.0 59206.0 0.0009467455621301775 24953207.0 exact 1.0 1.0 1.0 1.0 1.0
16 14 baseline 14 900.0014700889587 59169.0 59250.0 0.0013689600973482736 25494260.0 exact 1.0 1.0 1.0 1.0 1.0
17 15 baseline 15 900.0013790130615 59083.0 59196.0 0.0019125636816004605 23792716.0 exact 1.0 1.0 1.0 1.0 1.0
18 16 baseline 16 900.0020098686218 59126.0 59233.0 0.0018096945506207082 23398798.0 exact 1.0 1.0 1.0 1.0 1.0
19 17 baseline 17 900.0023510456085 59156.0 59197.0 0.0006930826965988235 25573586.0 exact 1.0 1.0 1.0 1.0 1.0
20 18 baseline 18 900.002711057663 59118.0 59211.0 0.0015731249365675429 24489136.0 exact 1.0 1.0 1.0 1.0 1.0
21 19 baseline 19 724.1934628486633 59159.0 59164.0 8.451799388089724e-05 20931760.0 exact 1.0 1.0 1.0 1.0 1.0
22 20 baseline 20 900.0011439323425 59068.0 59191.0 0.0020823457709758246 23411794.0 exact 1.0 1.0 1.0 1.0 1.0
23 21 baseline 21 380.06568694114685 59175.0 59180.0 8.449514152936207e-05 11618526.0 exact 1.0 1.0 1.0 1.0 1.0
24 22 baseline 22 900.0016028881073 59121.0 59154.94711904252 0.0005741973079365614 26352886.0 exact 1.0 1.0 1.0 1.0 1.0
25 23 baseline 23 230.25152111053467 59193.0 59198.0 8.44694474008751e-05 6776049.0 exact 1.0 1.0 1.0 1.0 1.0
26 24 baseline 24 900.0010840892792 59162.0 59240.0 0.001318413846725939 24727727.0 exact 1.0 1.0 1.0 1.0 1.0
27 25 baseline 25 900.0015320777893 59096.0 59210.0 0.001929064572898335 23438919.0 exact 1.0 1.0 1.0 1.0 1.0
28 26 baseline 26 900.0015478134155 59089.0 59203.0 0.001929293100238623 23826788.0 exact 1.0 1.0 1.0 1.0 1.0
29 27 baseline 27 900.0010070800781 59153.0 59249.0 0.0016229100806383447 24336831.0 exact 1.0 1.0 1.0 1.0 1.0
30 28 baseline 28 900.001277923584 59112.0 59208.0 0.0016240357287860333 25111591.0 exact 1.0 1.0 1.0 1.0 1.0
31 29 baseline 29 900.0012440681458 59182.0 59263.0 0.0013686593896792944 24919871.0 exact 1.0 1.0 1.0 1.0 1.0
32 30 baseline 30 900.0012910366058 59134.0 59241.0 0.001809449724354855 23615391.0 exact 1.0 1.0 1.0 1.0 1.0
33 31 baseline 31 900.0023548603058 59082.0 59169.0 0.0014725297044785213 26213904.0 exact 1.0 1.0 1.0 1.0 1.0
34 32 baseline 32 875.9193549156189 59175.0 59180.0 8.449514152936207e-05 24935695.0 exact 1.0 1.0 1.0 1.0 1.0
35 33 baseline 33 900.0018489360809 59088.0 59177.0 0.0015062279989168698 25210167.0 exact 1.0 1.0 1.0 1.0 1.0
36 34 baseline 34 232.1541509628296 59190.0 59195.0 8.447372867038352e-05 7309410.0 exact 1.0 1.0 1.0 1.0 1.0
37 35 baseline 35 900.0025398731232 59183.0 59262.0 0.001334842775797104 23927493.0 exact 1.0 1.0 1.0 1.0 1.0
38 36 baseline 36 900.0010929107666 59166.0 59254.0 0.00148734070243045 25589946.0 exact 1.0 1.0 1.0 1.0 1.0
39 37 baseline 37 622.9371509552002 59202.0 59207.0 8.445660619573663e-05 18595087.0 exact 1.0 1.0 1.0 1.0 1.0
40 38 baseline 38 557.924427986145 59212.0 59217.0 8.444234276835777e-05 16270407.0 exact 1.0 1.0 1.0 1.0 1.0
41 39 baseline 39 900.0010092258453 59143.0 59185.0 0.0007101432122144632 26304077.0 exact 1.0 1.0 1.0 1.0 1.0
42 40 baseline 40 900.0011250972748 59158.0 59242.99535479154 0.0014367516615088902 23949337.0 exact 1.0 1.0 1.0 1.0 1.0
43 41 baseline 41 900.000893831253 59170.0 59257.0 0.0014703396991718777 24299427.0 exact 1.0 1.0 1.0 1.0 1.0
44 42 baseline 42 900.0017001628876 59089.0 59228.0 0.002352383692396216 23229681.0 exact 1.0 1.0 1.0 1.0 1.0
45 43 baseline 43 127.60789799690247 59232.0 59237.0 8.44138303619665e-05 4041704.0 exact 1.0 1.0 1.0 1.0 1.0
46 44 baseline 44 166.38699293136597 59201.0 59206.0 8.445803280349994e-05 5151689.0 exact 1.0 1.0 1.0 1.0 1.0
47 45 baseline 45 900.0007989406586 59135.0 59247.0 0.001893971421324089 26922402.0 exact 1.0 1.0 1.0 1.0 1.0
48 46 baseline 46 900.001415014267 59152.0 59254.0 0.001724371111712199 26485728.0 exact 1.0 1.0 1.0 1.0 1.0
49 47 baseline 47 900.0020279884338 59123.0 59235.0 0.0018943558344468312 28222784.0 exact 1.0 1.0 1.0 1.0 1.0
50 48 baseline 48 900.0011022090912 59176.0 59284.0 0.0018250642152223874 28675410.0 exact 1.0 1.0 1.0 1.0 1.0
51 49 baseline 49 900.0012428760529 59150.0 59206.0 0.0009467455621301775 30531240.0 exact 1.0 1.0 1.0 1.0 1.0

View File

@@ -1,151 +0,0 @@
,Solver,Instance,Wallclock Time,Lower Bound,Upper Bound,Gap,Nodes,Mode,Relative Lower Bound,Relative Upper Bound,Relative Wallclock Time,Relative Gap,Relative Nodes,Predicted LB,Predicted UB,Sense
0,baseline,0,662.7372989654541,59162.0,59167.0,8.451370812345763e-05,18688107.0,exact,1.0,1.0004734608295711,8.70828628181939,1.0,5.622307810564691,,,
1,baseline,1,900.0007548332214,59137.0,59256.0,0.002012276578115224,24175550.0,exact,1.0,1.0019275641675967,5.256438928065297,23.8,2.9939288096459404,,,
2,baseline,2,900.0016160011293,59186.0,59285.0,0.0016726928665562802,24089218.0,exact,1.0,1.0015880792688077,6.615464163621432,19.8,3.971008553523482,,,
3,baseline,3,900.0023140907288,59145.0,59231.0,0.0014540535970918927,24595759.0,exact,1.0,1.0013693998309383,9.839168761119028,17.2,6.1066754492159765,,,
4,baseline,4,900.0024960041046,59142.0,59213.0,0.0012005004903452704,25467171.0,exact,1.0,1.0011162314042927,11.236705591195049,14.261938547000467,7.640381275476392,,,
5,baseline,5,900.002925157547,59126.0,59244.0,0.0019957379156377904,23457042.0,exact,1.0,1.0021143794719125,10.760170783867167,29.494511720731996,5.847345591041515,,,
6,baseline,6,900.0031039714813,59125.0,59236.97169757604,0.001893813066825174,24240772.0,exact,1.0,1.0018090934817527,5.582936655618509,22.394339515207683,3.3210931747954593,,,
7,baseline,7,900.002781867981,59105.0,59212.0,0.001810337534895525,24042592.0,exact,1.0,1.001725596345796,1.7540923744921773,21.400000000000002,2.390273708531383,,,
8,baseline,8,900.0021660327911,59169.0,59251.0,0.0013858608392908448,25512146.0,exact,1.0,1.0013181687594004,9.026100681465717,20.5,5.950915491512204,,,
9,baseline,9,900.0015439987183,59130.0,59256.0,0.00213089802130898,23227790.0,exact,1.0,1.0021478462345041,7.880275979497338,25.197442922374428,4.198068271097029,,,
10,baseline,10,900.0024099349976,59127.0,59201.0,0.0012515432881762985,25015636.0,exact,1.0,1.0011838122135597,13.04240249187423,18.5,8.325144625534431,,,
11,baseline,11,900.0025849342346,59198.0,59289.0,0.0015372140950707794,24558832.0,exact,1.0,1.0017741281427412,4.941352404443008,18.19415858643873,2.9831824806949396,,,
12,baseline,12,900.0022029876709,59102.0,59224.0,0.002064227944908802,24026788.0,exact,1.0,1.0019794609775492,5.378482195683288,24.400000000000002,2.947983888580514,,,
13,baseline,13,900.0011007785797,59150.0,59206.0,0.0009467455621301775,24953207.0,exact,1.0,1.0008790614328702,12.451848934586094,13.999999999999998,8.0844140865203,,,
14,baseline,14,900.0014700889587,59169.0,59250.0,0.0013689600973482733,25494260.0,exact,1.0,1.0014705136656357,10.286586243074352,20.246577599756627,6.714445655657852,,,
15,baseline,15,900.0013790130615,59083.0,59196.0,0.0019125636816004605,23792716.0,exact,1.0,1.0018277822908204,7.486704871117682,22.6,4.211841162852367,,,
16,baseline,16,900.0020098686218,59126.0,59233.0,0.0018096945506207078,23398798.0,exact,1.0,1.001724983511187,10.473065188128833,21.39999999999999,5.842727665213484,,,
17,baseline,17,900.0023510456085,59156.0,59197.0,0.0006930826965988236,25573586.0,exact,1.0,1.0006930826965987,12.267049016770867,10.249306917303404,7.807679578926801,,,
18,baseline,18,900.002711057663,59118.0,59211.0,0.0015731249365675427,24489136.0,exact,1.0,1.0014884224413512,19.84721287191386,18.599999999999998,12.51075303699155,,,
19,baseline,19,724.1934628486632,59159.0,59164.0,8.451799388089724e-05,20931760.0,exact,1.0,1.0,16.225906582646804,1.0,11.203195934004649,,,
20,baseline,20,900.0011439323425,59068.0,59191.0,0.0020823457709758246,23411794.0,exact,1.0,1.0020823457709758,4.553908811228339,24.597917654229025,2.465201726709367,,,
21,baseline,21,380.06568694114685,59175.0,59180.0,8.449514152936208e-05,11618526.0,exact,1.0,1.0000168978860744,7.912557532546788,1.2500000000000002,5.757968140817527,,,
22,baseline,22,900.0016028881073,59121.0,59154.94711904253,0.0005741973079365614,26352886.0,exact,1.0,1.0004895835849292,14.040381831195205,6.789423808503489,9.40925154833969,,,
23,baseline,23,230.2515211105347,59193.0,59198.0,8.44694474008751e-05,6776049.0,exact,1.0,1.0000337860666262,8.514542938541073,1.6666666666666665,5.846961043264265,,,
24,baseline,24,900.0010840892792,59162.0,59240.0,0.001318413846725939,24727727.0,exact,1.0,1.0015046237595306,8.166041819193602,15.595781075690478,4.940286958561307,,,
25,baseline,25,900.0015320777893,59096.0,59210.0,0.0019290645728983352,23438919.0,exact,1.0,1.0018443004348487,4.510705225924555,22.800000000000004,2.4862311177206236,,,
26,baseline,26,900.0015478134155,59089.0,59203.0,0.001929293100238623,23826788.0,exact,1.0,1.0018953816994127,8.894248013836016,28.49903535344988,4.939630736149807,,,
27,baseline,27,900.0010070800781,59153.0,59249.0,0.0016229100806383447,24336831.0,exact,1.0,1.001538253490652,10.091974748981741,19.2,6.084119530266811,,,
28,baseline,28,900.001277923584,59112.0,59208.0,0.0016240357287860333,25111591.0,exact,1.0,1.001759610178668,4.691858473111718,19.195777507105156,2.8027693088636902,,,
29,baseline,29,900.0012440681458,59182.0,59263.0,0.0013686593896792946,24919871.0,exact,1.0,1.0012840657576834,7.56448716001105,16.200000000000003,4.595493258310103,,,
30,baseline,30,900.0012910366057,59134.0,59241.0,0.001809449724354855,23615391.0,exact,1.0,1.0017247501648658,9.031820270959846,21.4,5.0375202116086095,,,
31,baseline,31,900.0023548603058,59082.0,59169.0,0.0014725297044785213,26213904.0,exact,1.0,1.0013883344383072,10.04484347330425,17.513740798402758,6.772202916754611,,,
32,baseline,32,875.9193549156189,59175.0,59180.0,8.449514152936208e-05,24935695.0,exact,1.0,1.0,17.593042802030894,1.0000000000000002,11.640863122484049,,,
33,baseline,33,900.0018489360809,59088.0,59177.0,0.0015062279989168698,25210167.0,exact,1.0,1.0017435758540136,6.884821185789175,17.794276333604117,4.149329955955837,,,
34,baseline,34,232.1541509628296,59190.0,59195.0,8.447372867038352e-05,7309410.0,exact,1.0,1.0000337877789605,7.0924172424290814,1.6666666666666667,5.371410472817808,,,
35,baseline,35,900.0025398731233,59183.0,59262.0,0.001334842775797104,23927493.0,exact,1.0,1.0012671701556084,12.033207650833896,19.75,7.544694464615838,,,
36,baseline,36,900.0010929107666,59166.0,59254.0,0.00148734070243045,25589946.0,exact,1.0,1.001402714167413,6.350860539510311,17.599999999999998,3.8428016019966393,,,
37,baseline,37,622.9371509552003,59202.0,59207.0,8.445660619573664e-05,18595087.0,exact,1.0,1.0007944557133197,5.973803009115679,1.0000000000000002,3.967704381695003,,,
38,baseline,38,557.924427986145,59212.0,59217.0,8.444234276835778e-05,16270407.0,exact,1.0,1.0000168873277493,11.776853444610294,1.25,8.26095170688216,,,
39,baseline,39,900.0010092258452,59143.0,59185.0,0.0007101432122144633,26304077.0,exact,1.0,1.0006424670735625,19.899221771336656,10.5,13.274381840230484,,,
40,baseline,40,900.0011250972748,59158.0,59242.995354791536,0.0014367516615088902,23949337.0,exact,1.0,1.0013521179587164,11.618267974647784,16.999070958308586,7.043833563281406,,,
41,baseline,41,900.000893831253,59170.0,59257.0,0.0014703396991718775,24299427.0,exact,1.0,1.0013857203210816,9.82799588949917,17.4,5.913918333433118,,,
42,baseline,42,900.0017001628876,59089.0,59228.0,0.002352383692396216,23229681.0,exact,1.0,1.002267573696145,5.808712131855351,27.799999999999997,2.898764108739463,,,
43,baseline,43,127.60789799690248,59232.0,59237.0,8.44138303619665e-05,4041704.0,exact,1.0,1.0000168816260382,7.326284526634964,1.25,5.803235519206498,,,
44,baseline,44,166.38699293136597,59201.0,59206.0,8.445803280349994e-05,5151689.0,exact,1.0,1.0000168904653324,11.73610231262945,1.25,8.75222174086627,,,
45,baseline,45,900.0007989406586,59135.0,59247.0,0.001893971421324089,26922402.0,exact,1.0,1.0020634249471458,4.661659000381325,22.394318085736028,2.560992168457917,,,
46,baseline,46,900.001415014267,59152.0,59254.0,0.001724371111712199,26485728.0,exact,1.0,1.001639704515104,10.21700063178535,20.4,6.123151372348114,,,
47,baseline,47,900.0020279884337,59123.0,59235.0,0.0018943558344468312,28222784.0,exact,1.0,1.0018435206169876,10.41166170663056,22.39924225766622,6.824112904473778,,,
48,baseline,48,900.0011022090912,59176.0,59284.0,0.0018250642152223876,28675410.0,exact,0.9998817227920179,1.0016219503953505,9.824748557639392,21.602555089901312,6.1253016948312595,,,
49,baseline,49,900.0012428760529,59150.0,59206.0,0.0009467455621301775,30531240.0,exact,1.0,1.001115995941833,12.802607222912103,11.197159763313609,9.326024324264884,,,
50,ml-exact,0,649.376060962677,59162.0,59167.0,8.451370812345763e-05,18101461.0,exact,1.0,1.0004734608295711,8.532721264142948,1.0,5.445815649649917,59126.38771406158,59263.992667692604,max
51,ml-exact,1,900.0008749961853,59137.0,59256.99395246509,0.0020290842021930574,23342139.0,exact,1.0,1.0019443703707194,5.256439629875021,23.998790493018166,2.8907182021033684,59159.91471896955,59292.24515179818,max
52,ml-exact,2,900.0023529529572,59186.0,59272.0,0.0014530463285236373,24785817.0,exact,1.0,1.0013684512848238,6.615469580587714,17.2,4.085840034868203,59194.00902156645,59323.12664303628,max
53,ml-exact,3,900.0030598640442,59145.0,59228.0,0.0014033307971933384,24207954.0,exact,1.0,1.0013186813186814,9.839176914197518,16.599999999999998,6.010390586749109,59141.813764752675,59274.22541262452,max
54,ml-exact,4,900.0010681152344,59142.0,59214.0,0.0012174089479557674,24895987.0,exact,1.0,1.0011331384387514,11.236687763725765,14.462810920901886,7.469020917529618,59144.93070046487,59273.654326628006,max
55,ml-exact,5,900.0023910999298,59126.0,59241.96239661033,0.0019612758618937826,23775703.0,exact,1.0,1.0020799133376805,10.760164398831064,28.98520564396274,5.926781054105736,59145.04845907292,59279.36037916677,max
56,ml-exact,6,900.0027949810028,59125.0,59236.0,0.0018773784355179705,23994400.0,exact,1.0,1.0017926602401488,5.582934738875933,22.200000000000003,3.2873391191217904,59136.974634353304,59268.30857737715,max
57,ml-exact,7,900.0025460720062,59105.0,59212.0,0.001810337534895525,24113420.0,exact,1.0,1.001725596345796,1.7540919149292407,21.400000000000002,2.397315308132119,59125.024194597165,59260.190615193496,max
58,ml-exact,8,900.0025360584259,59169.0,59247.0,0.0013182578715205597,26072662.0,exact,1.0,1.0012505703614825,9.026104392444157,19.5,6.081660406018433,59155.83957873982,59292.27671868388,max
59,ml-exact,9,900.0029451847076,59130.0,59260.0,0.002198545577541011,23411285.0,exact,1.0,1.0022154949348037,7.880288248067729,25.99736174530695,4.231232189722303,59169.22723451526,59303.04692137199,max
60,ml-exact,10,900.002711057663,59127.0,59199.0,0.0012177177939012634,25692461.0,exact,1.0,1.001149989007458,13.042406855599213,18.0,8.550390388271678,59122.74947289353,59256.99939978048,max
61,ml-exact,11,900.0020880699158,59198.0,59271.0,0.0012331497685732625,25044283.0,exact,1.0,1.0014699918896999,4.941349676471181,14.59531403087942,3.042150631885348,59194.32665087494,59329.026081343145,max
62,ml-exact,12,900.00151014328,59102.0,59222.0,0.0020303881425332475,23860011.0,exact,1.0,1.001945624037762,5.378478055192067,24.0,2.9275210656269923,59122.0422679371,59259.06427666924,max
63,ml-exact,13,900.0019900798798,59150.0,59203.96517006869,0.0009123443798594788,26120169.0,exact,1.0,1.0008446625768113,12.451861238399319,13.491292517172042,8.462489899830945,59136.5588570761,59273.99511476752,max
64,ml-exact,14,900.0015881061554,59169.0,59254.0,0.0014365630651185586,25996193.0,exact,1.0,1.001538123489343,10.28658759195445,21.246408592337204,6.8466401908701435,59154.03941864104,59289.795816422404,max
65,ml-exact,15,900.0014340877533,59083.0,59198.0,0.001946414366230557,23870719.0,exact,1.0,1.0018616301110208,7.486705329259161,23.0,4.2256494328382725,59098.66203099143,59228.46969562256,max
66,ml-exact,16,900.0031027793884,59126.0,59230.0,0.001758955451070595,23581309.0,exact,1.0,1.0016742487020345,10.47307790601788,20.799999999999997,5.888301034790237,59145.19863458289,59278.22282794401,max
67,ml-exact,17,900.001091003418,59156.0,59189.0,0.0005578470484819798,25974343.0,exact,1.0,1.000557847048482,12.267031842372049,8.249442152951518,7.930031690398847,59127.14368529404,59267.37100160227,max
68,ml-exact,18,900.0016350746155,59118.0,59207.0,0.00150546364897324,24423315.0,exact,1.0,1.001420766875835,19.84718914391357,17.8,12.477127094628871,59117.71413049835,59253.828178881624,max
69,ml-exact,19,765.9083199501038,59159.0,59164.0,8.451799388089724e-05,22220414.0,exact,1.0,1.0,17.160548234580467,1.0,11.892915444124142,59144.031479967074,59274.181598455296,max
70,ml-exact,20,900.0015428066254,59068.0,59191.0,0.0020823457709758246,23475405.0,exact,1.0,1.0020823457709758,4.55391082948923,24.597917654229025,2.471899801493286,59082.22244394464,59224.2971810249,max
71,ml-exact,21,428.18276500701904,59175.0,59180.0,8.449514152936207e-05,12999314.0,exact,1.0,1.0000168978860744,8.91430318224869,1.25,6.442266072691428,59137.98908684254,59265.43858161565,max
72,ml-exact,22,900.0029540061951,59121.0,59157.0,0.0006089206880803775,26135751.0,exact,1.0,1.0005243040286844,14.040402909173055,7.199999999999999,9.33172387888638,59089.5855153968,59226.64120575328,max
73,ml-exact,23,287.76060605049133,59193.0,59198.0,8.44694474008751e-05,7976958.0,exact,1.0,1.0000337860666262,10.641189358576659,1.6666666666666665,6.883209178350868,59140.59510257357,59271.35823619222,max
74,ml-exact,24,900.0017418861389,59162.0,59248.0,0.0014536357797234711,25158901.0,exact,1.0,1.001639870839039,8.166047787627152,17.195348365504884,5.026430067835795,59150.82751230766,59285.37521566199,max
75,ml-exact,25,900.0040528774261,59096.0,59209.0,0.001912142953837823,25156445.0,exact,1.0,1.0018273802473732,4.510717859885376,22.6,2.6684138620141735,59123.050715683305,59257.270874657275,max
76,ml-exact,26,900.0013389587402,59089.0,59199.0,0.0018615986054934083,23531336.0,exact,1.0,1.0018276894958622,8.894245949833698,27.499069200697253,4.878379350513735,59118.02329883662,59245.3612305393,max
77,ml-exact,27,900.0014967918396,59153.0,59246.0,0.0015721941406183963,25053692.0,exact,1.0,1.001487541837114,10.091980240263075,18.599999999999998,6.263332181683365,59150.16240797118,59285.64193535254,max
78,ml-exact,28,900.001298904419,59112.0,59215.0,0.0017424550006766815,24700106.0,exact,1.0,1.0018780454791554,4.691858582488349,20.59546961699824,2.756842408849359,59105.239747395564,59244.63088727762,max
79,ml-exact,29,900.0012950897217,59182.0,59264.0,0.0013855564191815079,24368468.0,exact,1.0,1.001300961359758,7.564487588846076,16.4,4.493808591920299,59171.61319933031,59313.94235456237,max
80,ml-exact,30,900.0028159618378,59134.0,59240.0,0.0017925389792674265,24191195.0,exact,1.0,1.001707840849524,9.031835574105251,21.2,5.160347916977751,59148.03866371211,59281.43814639009,max
81,ml-exact,31,900.0012450218201,59082.0,59151.0,0.0011678683863105513,27104772.0,exact,1.0,1.0010836987334635,10.044831086499027,13.890208219422876,7.002353254836392,59069.41377677144,59203.823126466195,max
82,ml-exact,32,900.0074319839478,59175.0,59197.964519141264,0.0003880780589989647,25690960.0,exact,1.0,1.0003035572683552,18.076857400376596,4.592903828252747,11.99344749946664,59157.64838384942,59290.523875622814,max
83,ml-exact,33,900.0013158321381,59088.0,59177.0,0.0015062279989168698,24765342.0,exact,1.0,1.0017435758540136,6.884817107658309,17.794276333604117,4.076116410894511,59081.01088134223,59219.82627379965,max
84,ml-exact,34,239.74270606040955,59190.0,59195.0,8.447372867038352e-05,7385996.0,exact,1.0,1.0000337877789605,7.324251128646419,1.6666666666666667,5.427690643511643,59143.23414106734,59275.920682982185,max
85,ml-exact,35,900.0009059906006,59183.0,59259.953906894,0.0013002704643901015,23480392.0,exact,1.0,1.0012326001806815,12.033185805509241,19.238476723499844,7.403716868683651,59171.487583005524,59304.98468958104,max
86,ml-exact,36,900.0016748905182,59166.0,59246.0,0.0013521279113004091,25394548.0,exact,1.0,1.0012675128018793,6.350864646252256,16.0,3.813458994262065,59158.08309355407,59296.56640928705,max
87,ml-exact,37,662.0322141647339,59202.0,59207.0,8.445660619573663e-05,20024242.0,exact,1.0,1.0007944557133197,6.348714355925809,1.0,4.272648615385403,59175.74869590455,59307.463498356396,max
88,ml-exact,38,446.4717228412628,59212.0,59217.0,8.444234276835777e-05,13956868.0,exact,1.0,1.0000168873277493,9.424272864415235,1.25,7.086301684237463,59166.58497608687,59301.825104164076,max
89,ml-exact,39,900.00270819664,59143.0,59184.0,0.0006932350404950712,26147788.0,exact,1.0,1.000625560045311,19.899259335957453,10.249999999999998,13.195510421802544,59114.89526526199,59248.56148321837,max
90,ml-exact,40,900.0016450881958,59158.0,59249.0,0.0015382534906521518,24805820.0,exact,1.0,1.0014536112097088,11.618274687299241,18.2,7.2957371421479085,59155.37108495968,59280.88309711401,max
91,ml-exact,41,900.002336025238,59170.0,59245.0,0.0012675342234240324,25030081.0,exact,1.0,1.0011829319814112,9.82801163823526,15.0,6.09174261241699,59151.28565990848,59290.71555008509,max
92,ml-exact,42,900.0009651184082,59089.0,59214.0,0.002115452960787964,22815014.0,exact,1.0,1.0020306630114733,5.808707387795664,24.999999999999996,2.8470190237906574,59118.99827325628,59249.97235571583,max
93,ml-exact,43,155.91705012321472,59232.0,59237.0,8.44138303619665e-05,4841747.0,exact,1.0,1.0000168816260382,8.951582854095786,1.25,6.951968319652182,59161.307350621355,59293.05481512429,max
94,ml-exact,44,166.24281811714172,59201.0,59206.0,8.445803280349994e-05,5152172.0,exact,1.0,1.0000168904653324,11.72593294577673,1.25,8.753042311188128,59149.268537538504,59278.50295984831,max
95,ml-exact,45,900.0014069080353,59135.0,59246.0,0.0018770609622051238,26599239.0,exact,1.0,1.002046511627907,4.661662149419189,22.194368817113382,2.5302513039490457,59157.45644175721,59292.66862156513,max
96,ml-exact,46,900.0008680820465,59152.0,59255.0,0.0017412767108466324,25313316.0,exact,1.0,1.0016566086853627,10.21699442289862,20.599999999999998,5.852105164112592,59168.404124939494,59297.86061218224,max
97,ml-exact,47,900.0009942054749,59123.0,59235.0,0.0018943558344468312,26222277.0,exact,1.0,1.0018435206169876,10.411649747325901,22.39924225766622,6.340401388480525,59155.45167739807,59284.172466642885,max
98,ml-exact,48,900.0013608932495,59176.0,59288.0,0.00189265918615655,27293583.0,exact,0.9998817227920179,1.0016895316618233,9.82475138153239,22.40264972286062,5.830132165779587,59191.94394017174,59332.08571744459,max
99,ml-exact,49,900.0012040138245,59150.0,59203.0,0.0008960270498732037,30493377.0,exact,1.0,1.0010652688535677,12.802606670093036,10.59731191885038,9.314458752116828,59139.98187390398,59273.225027033564,max
100,ml-heuristic,0,76.10421586036682,59134.0,59139.0,8.455372543714276e-05,3323921.0,heuristic,0.9995267232345086,1.0,1.0,1.000473500862448,1.0,59126.38771406158,59263.992667692604,max
101,ml-heuristic,1,171.21872186660767,59137.0,59142.0,8.454943605526151e-05,8074858.0,heuristic,1.0,1.0,1.0,1.0,1.0,59159.91471896955,59292.24515179818,max
102,ml-heuristic,2,136.04512000083923,59186.0,59191.0,8.447943770486263e-05,6066272.0,heuristic,1.0,1.0,1.0,1.0,1.0,59194.00902156645,59323.12664303628,max
103,ml-heuristic,3,91.47137689590454,59145.0,59150.0,8.453799983092401e-05,4027684.0,heuristic,1.0,1.0,1.0,1.0,1.0,59141.813764752675,59274.22541262452,max
104,ml-heuristic,4,80.09487199783325,59142.0,59146.97828536885,8.417512713219175e-05,3333233.0,heuristic,1.0,1.0,1.0,1.0,1.0,59144.93070046487,59273.654326628006,max
105,ml-heuristic,5,83.6420669555664,59115.0,59119.0,6.766472130592912e-05,4011571.0,heuristic,0.999813956634983,1.0,1.0,1.0,1.0,59145.04845907292,59279.36037916677,max
106,ml-heuristic,6,161.20603895187378,59125.0,59130.0,8.456659619450317e-05,7299034.0,heuristic,1.0,1.0,1.0,1.0,1.0,59136.974634353304,59268.30857737715,max
107,ml-heuristic,7,513.0874490737915,59105.0,59110.0,8.459521191100583e-05,10058510.0,heuristic,1.0,1.0,1.0,1.0,1.0,59125.024194597165,59260.190615193496,max
108,ml-heuristic,8,99.7110710144043,59169.0,59173.0,6.760296777028511e-05,4287096.0,heuristic,1.0,1.0,1.0,1.0,1.0,59155.83957873982,59292.27671868388,max
109,ml-heuristic,9,114.2093939781189,59124.0,59129.0,8.456802652053312e-05,5532971.0,heuristic,0.999898528665652,1.0,1.0,1.0,1.0,59169.22723451526,59303.04692137199,max
110,ml-heuristic,10,69.00587606430054,59127.0,59131.0,6.765098855007019e-05,3004829.0,heuristic,1.0,1.0,1.0,1.0,1.0,59122.74947289353,59256.99939978048,max
111,ml-heuristic,11,182.13689517974854,59179.0,59184.0,8.448943037226044e-05,8232427.0,heuristic,0.9996790432109193,1.0,1.0,1.0,1.0,59194.32665087494,59329.026081343145,max
112,ml-heuristic,12,167.33386301994324,59102.0,59107.0,8.459950593888532e-05,8150244.0,heuristic,1.0,1.0,1.0,1.0,1.0,59122.0422679371,59259.06427666924,max
113,ml-heuristic,13,72.27851104736328,59150.0,59154.0,6.76246830092984e-05,3086582.0,heuristic,1.0,1.0,1.0,1.0,1.0,59136.5588570761,59273.99511476752,max
114,ml-heuristic,14,87.49272584915161,59159.0,59163.0,6.761439510471779e-05,3796927.0,heuristic,0.9998309925805743,1.0,1.0,1.0,1.0,59154.03941864104,59289.795816422404,max
115,ml-heuristic,15,120.21328401565552,59083.0,59088.0,8.462671157524161e-05,5649006.0,heuristic,1.0,1.0,1.0,1.0,1.0,59098.66203099143,59228.46969562256,max
116,ml-heuristic,16,85.93491911888123,59126.0,59131.0,8.456516591685553e-05,4004773.0,heuristic,1.0,1.0,1.0,1.0,1.0,59145.19863458289,59278.22282794401,max
117,ml-heuristic,17,73.36747002601624,59152.0,59156.0,6.76223965377333e-05,3275440.0,heuristic,0.9999323821759416,1.0,1.0,1.0,1.0,59127.14368529404,59267.37100160227,max
118,ml-heuristic,18,45.34655404090881,59118.0,59123.0,8.457660949287865e-05,1957447.0,heuristic,1.0,1.0,1.0,1.0,1.0,59117.71413049835,59253.828178881624,max
119,ml-heuristic,19,44.6319260597229,59159.0,59164.0,8.451799388089724e-05,1868374.0,heuristic,1.0,1.0,1.0,1.0,1.0,59144.031479967074,59274.181598455296,max
120,ml-heuristic,20,197.63266706466675,59063.0,59068.0,8.465536799688468e-05,9496908.0,heuristic,0.9999153517979278,1.0,1.0,1.0,1.0,59082.22244394464,59224.2971810249,max
121,ml-heuristic,21,48.03322887420654,59175.0,59179.0,6.759611322348965e-05,2017817.0,heuristic,1.0,1.0,1.0,1.0,1.0,59137.98908684254,59265.43858161565,max
122,ml-heuristic,22,64.1009349822998,59121.0,59126.0,8.457231778894132e-05,2800742.0,heuristic,1.0,1.0,1.0,1.0,1.0,59089.5855153968,59226.64120575328,max
123,ml-heuristic,23,27.042146921157837,59193.0,59196.0,5.068166844052506e-05,1158901.0,heuristic,1.0,1.0,1.0,1.0,1.0,59140.59510257357,59271.35823619222,max
124,ml-heuristic,24,110.21264696121216,59146.0,59151.0,8.453657052040713e-05,5005322.0,heuristic,0.9997295561340049,1.0,1.0,1.0,1.0,59150.82751230766,59285.37521566199,max
125,ml-heuristic,25,199.52568101882935,59096.0,59101.0,8.460809530255854e-05,9427490.0,heuristic,1.0,1.0,1.0,1.0,1.0,59123.050715683305,59257.270874657275,max
126,ml-heuristic,26,101.18916702270508,59087.0,59091.0,6.769678609508014e-05,4823597.0,heuristic,0.9999661527526273,1.0,1.0,1.0,1.0,59118.02329883662,59245.3612305393,max
127,ml-heuristic,27,89.17987108230591,59153.0,59158.0,8.452656669991379e-05,4000058.0,heuristic,1.0,1.0,1.0,1.0,1.0,59150.16240797118,59285.64193535254,max
128,ml-heuristic,28,191.82191514968872,59099.0,59104.0,8.46038004027141e-05,8959564.0,heuristic,0.9997800784950602,1.0,1.0,1.0,1.0,59105.239747395564,59244.63088727762,max
129,ml-heuristic,29,118.9771659374237,59182.0,59187.0,8.448514751106756e-05,5422676.0,heuristic,1.0,1.0,1.0,1.0,1.0,59171.61319933031,59313.94235456237,max
130,ml-heuristic,30,99.64783000946045,59134.0,59139.0,8.455372543714276e-05,4687900.0,heuristic,1.0,1.0,1.0,1.0,1.0,59148.03866371211,59281.43814639009,max
131,ml-heuristic,31,89.59844493865967,59082.0,59086.96752812557,8.407853704291519e-05,3870809.0,heuristic,1.0,1.0,1.0,1.0,1.0,59069.41377677144,59203.823126466195,max
132,ml-heuristic,32,49.78782606124878,59175.0,59180.0,8.449514152936207e-05,2142083.0,heuristic,1.0,1.0,1.0,1.0,1.0,59157.64838384942,59290.523875622814,max
133,ml-heuristic,33,130.72261786460876,59069.0,59074.0,8.464676903282602e-05,6075720.0,heuristic,0.9996784457080964,1.0,1.0,1.0,1.0,59081.01088134223,59219.82627379965,max
134,ml-heuristic,34,32.732726097106934,59190.0,59193.0,5.0684237202230105e-05,1360799.0,heuristic,1.0,1.0,1.0,1.0,1.0,59143.23414106734,59275.920682982185,max
135,ml-heuristic,35,74.79323601722717,59183.0,59187.0,6.758697598972678e-05,3171433.0,heuristic,1.0,1.0,1.0,1.0,1.0,59171.487583005524,59304.98468958104,max
136,ml-heuristic,36,141.71325087547302,59166.0,59171.0,8.450799445627557e-05,6659190.0,heuristic,1.0,1.0,1.0,1.0,1.0,59158.08309355407,59296.56640928705,max
137,ml-heuristic,37,104.27815413475037,59155.0,59160.0,8.452370890034655e-05,4686611.0,heuristic,0.9992061079017601,1.0,1.0,1.0007945228636634,1.0,59175.74869590455,59307.463498356396,max
138,ml-heuristic,38,47.3746600151062,59212.0,59216.0,6.755387421468622e-05,1969556.0,heuristic,1.0,1.0,1.0,1.0,1.0,59166.58497608687,59301.825104164076,max
139,ml-heuristic,39,45.22795009613037,59143.0,59147.0,6.763268687756793e-05,1981567.0,heuristic,1.0,1.0,1.0,1.0,1.0,59114.89526526199,59248.56148321837,max
140,ml-heuristic,40,77.46431112289429,59158.0,59163.0,8.451942256330505e-05,3400043.0,heuristic,1.0,1.0,1.0,1.0,1.0,59155.37108495968,59280.88309711401,max
141,ml-heuristic,41,91.57522082328796,59170.0,59175.0,8.450228156160216e-05,4108854.0,heuristic,1.0,1.0,1.0,1.0,1.0,59151.28565990848,59290.71555008509,max
142,ml-heuristic,42,154.93997287750244,59089.0,59094.0,8.461811843151856e-05,8013650.0,heuristic,1.0,1.0,1.0,1.0,1.0,59118.99827325628,59249.97235571583,max
143,ml-heuristic,43,17.417819023132324,59232.0,59236.0,6.75310642895732e-05,696457.0,heuristic,1.0,1.0,1.0,1.0,1.0,59161.307350621355,59293.05481512429,max
144,ml-heuristic,44,14.177363872528076,59201.0,59205.0,6.756642624279995e-05,588615.0,heuristic,1.0,1.0,1.0,1.0,1.0,59149.268537538504,59278.50295984831,max
145,ml-heuristic,45,193.06448602676392,59120.0,59125.0,8.457374830852504e-05,10512489.0,heuristic,0.9997463431132155,1.0,1.0,1.0,1.0,59157.45644175721,59292.66862156513,max
146,ml-heuristic,46,88.08861303329468,59152.0,59157.0,8.452799567216662e-05,4325506.0,heuristic,1.0,1.0,1.0,1.0,1.0,59168.404124939494,59297.86061218224,max
147,ml-heuristic,47,86.44172787666321,59121.0,59126.0,8.457231778894132e-05,4135744.0,heuristic,0.999966172217242,1.0,1.0,1.0,1.0,59155.45167739807,59284.172466642885,max
148,ml-heuristic,48,91.60550999641418,59183.0,59188.0,8.448371998715847e-05,4681469.0,heuristic,1.0,1.0,1.0,1.0,1.0,59191.94394017174,59332.08571744459,max
149,ml-heuristic,49,70.29827809333801,59135.0,59140.0,8.45522955948254e-05,3273768.0,heuristic,0.9997464074387151,1.0,1.0,1.0,1.0,59139.98187390398,59273.225027033564,max
1 Solver Instance Wallclock Time Lower Bound Upper Bound Gap Nodes Mode Relative Lower Bound Relative Upper Bound Relative Wallclock Time Relative Gap Relative Nodes Predicted LB Predicted UB Sense
2 0 baseline 0 662.7372989654541 59162.0 59167.0 8.451370812345763e-05 18688107.0 exact 1.0 1.0004734608295711 8.70828628181939 1.0 5.622307810564691
3 1 baseline 1 900.0007548332214 59137.0 59256.0 0.002012276578115224 24175550.0 exact 1.0 1.0019275641675967 5.256438928065297 23.8 2.9939288096459404
4 2 baseline 2 900.0016160011293 59186.0 59285.0 0.0016726928665562802 24089218.0 exact 1.0 1.0015880792688077 6.615464163621432 19.8 3.971008553523482
5 3 baseline 3 900.0023140907288 59145.0 59231.0 0.0014540535970918927 24595759.0 exact 1.0 1.0013693998309383 9.839168761119028 17.2 6.1066754492159765
6 4 baseline 4 900.0024960041046 59142.0 59213.0 0.0012005004903452704 25467171.0 exact 1.0 1.0011162314042927 11.236705591195049 14.261938547000467 7.640381275476392
7 5 baseline 5 900.002925157547 59126.0 59244.0 0.0019957379156377904 23457042.0 exact 1.0 1.0021143794719125 10.760170783867167 29.494511720731996 5.847345591041515
8 6 baseline 6 900.0031039714813 59125.0 59236.97169757604 0.001893813066825174 24240772.0 exact 1.0 1.0018090934817527 5.582936655618509 22.394339515207683 3.3210931747954593
9 7 baseline 7 900.002781867981 59105.0 59212.0 0.001810337534895525 24042592.0 exact 1.0 1.001725596345796 1.7540923744921773 21.400000000000002 2.390273708531383
10 8 baseline 8 900.0021660327911 59169.0 59251.0 0.0013858608392908448 25512146.0 exact 1.0 1.0013181687594004 9.026100681465717 20.5 5.950915491512204
11 9 baseline 9 900.0015439987183 59130.0 59256.0 0.00213089802130898 23227790.0 exact 1.0 1.0021478462345041 7.880275979497338 25.197442922374428 4.198068271097029
12 10 baseline 10 900.0024099349976 59127.0 59201.0 0.0012515432881762985 25015636.0 exact 1.0 1.0011838122135597 13.04240249187423 18.5 8.325144625534431
13 11 baseline 11 900.0025849342346 59198.0 59289.0 0.0015372140950707794 24558832.0 exact 1.0 1.0017741281427412 4.941352404443008 18.19415858643873 2.9831824806949396
14 12 baseline 12 900.0022029876709 59102.0 59224.0 0.002064227944908802 24026788.0 exact 1.0 1.0019794609775492 5.378482195683288 24.400000000000002 2.947983888580514
15 13 baseline 13 900.0011007785797 59150.0 59206.0 0.0009467455621301775 24953207.0 exact 1.0 1.0008790614328702 12.451848934586094 13.999999999999998 8.0844140865203
16 14 baseline 14 900.0014700889587 59169.0 59250.0 0.0013689600973482733 25494260.0 exact 1.0 1.0014705136656357 10.286586243074352 20.246577599756627 6.714445655657852
17 15 baseline 15 900.0013790130615 59083.0 59196.0 0.0019125636816004605 23792716.0 exact 1.0 1.0018277822908204 7.486704871117682 22.6 4.211841162852367
18 16 baseline 16 900.0020098686218 59126.0 59233.0 0.0018096945506207078 23398798.0 exact 1.0 1.001724983511187 10.473065188128833 21.39999999999999 5.842727665213484
19 17 baseline 17 900.0023510456085 59156.0 59197.0 0.0006930826965988236 25573586.0 exact 1.0 1.0006930826965987 12.267049016770867 10.249306917303404 7.807679578926801
20 18 baseline 18 900.002711057663 59118.0 59211.0 0.0015731249365675427 24489136.0 exact 1.0 1.0014884224413512 19.84721287191386 18.599999999999998 12.51075303699155
21 19 baseline 19 724.1934628486632 59159.0 59164.0 8.451799388089724e-05 20931760.0 exact 1.0 1.0 16.225906582646804 1.0 11.203195934004649
22 20 baseline 20 900.0011439323425 59068.0 59191.0 0.0020823457709758246 23411794.0 exact 1.0 1.0020823457709758 4.553908811228339 24.597917654229025 2.465201726709367
23 21 baseline 21 380.06568694114685 59175.0 59180.0 8.449514152936208e-05 11618526.0 exact 1.0 1.0000168978860744 7.912557532546788 1.2500000000000002 5.757968140817527
24 22 baseline 22 900.0016028881073 59121.0 59154.94711904253 0.0005741973079365614 26352886.0 exact 1.0 1.0004895835849292 14.040381831195205 6.789423808503489 9.40925154833969
25 23 baseline 23 230.2515211105347 59193.0 59198.0 8.44694474008751e-05 6776049.0 exact 1.0 1.0000337860666262 8.514542938541073 1.6666666666666665 5.846961043264265
26 24 baseline 24 900.0010840892792 59162.0 59240.0 0.001318413846725939 24727727.0 exact 1.0 1.0015046237595306 8.166041819193602 15.595781075690478 4.940286958561307
27 25 baseline 25 900.0015320777893 59096.0 59210.0 0.0019290645728983352 23438919.0 exact 1.0 1.0018443004348487 4.510705225924555 22.800000000000004 2.4862311177206236
28 26 baseline 26 900.0015478134155 59089.0 59203.0 0.001929293100238623 23826788.0 exact 1.0 1.0018953816994127 8.894248013836016 28.49903535344988 4.939630736149807
29 27 baseline 27 900.0010070800781 59153.0 59249.0 0.0016229100806383447 24336831.0 exact 1.0 1.001538253490652 10.091974748981741 19.2 6.084119530266811
30 28 baseline 28 900.001277923584 59112.0 59208.0 0.0016240357287860333 25111591.0 exact 1.0 1.001759610178668 4.691858473111718 19.195777507105156 2.8027693088636902
31 29 baseline 29 900.0012440681458 59182.0 59263.0 0.0013686593896792946 24919871.0 exact 1.0 1.0012840657576834 7.56448716001105 16.200000000000003 4.595493258310103
32 30 baseline 30 900.0012910366057 59134.0 59241.0 0.001809449724354855 23615391.0 exact 1.0 1.0017247501648658 9.031820270959846 21.4 5.0375202116086095
33 31 baseline 31 900.0023548603058 59082.0 59169.0 0.0014725297044785213 26213904.0 exact 1.0 1.0013883344383072 10.04484347330425 17.513740798402758 6.772202916754611
34 32 baseline 32 875.9193549156189 59175.0 59180.0 8.449514152936208e-05 24935695.0 exact 1.0 1.0 17.593042802030894 1.0000000000000002 11.640863122484049
35 33 baseline 33 900.0018489360809 59088.0 59177.0 0.0015062279989168698 25210167.0 exact 1.0 1.0017435758540136 6.884821185789175 17.794276333604117 4.149329955955837
36 34 baseline 34 232.1541509628296 59190.0 59195.0 8.447372867038352e-05 7309410.0 exact 1.0 1.0000337877789605 7.0924172424290814 1.6666666666666667 5.371410472817808
37 35 baseline 35 900.0025398731233 59183.0 59262.0 0.001334842775797104 23927493.0 exact 1.0 1.0012671701556084 12.033207650833896 19.75 7.544694464615838
38 36 baseline 36 900.0010929107666 59166.0 59254.0 0.00148734070243045 25589946.0 exact 1.0 1.001402714167413 6.350860539510311 17.599999999999998 3.8428016019966393
39 37 baseline 37 622.9371509552003 59202.0 59207.0 8.445660619573664e-05 18595087.0 exact 1.0 1.0007944557133197 5.973803009115679 1.0000000000000002 3.967704381695003
40 38 baseline 38 557.924427986145 59212.0 59217.0 8.444234276835778e-05 16270407.0 exact 1.0 1.0000168873277493 11.776853444610294 1.25 8.26095170688216
41 39 baseline 39 900.0010092258452 59143.0 59185.0 0.0007101432122144633 26304077.0 exact 1.0 1.0006424670735625 19.899221771336656 10.5 13.274381840230484
42 40 baseline 40 900.0011250972748 59158.0 59242.995354791536 0.0014367516615088902 23949337.0 exact 1.0 1.0013521179587164 11.618267974647784 16.999070958308586 7.043833563281406
43 41 baseline 41 900.000893831253 59170.0 59257.0 0.0014703396991718775 24299427.0 exact 1.0 1.0013857203210816 9.82799588949917 17.4 5.913918333433118
44 42 baseline 42 900.0017001628876 59089.0 59228.0 0.002352383692396216 23229681.0 exact 1.0 1.002267573696145 5.808712131855351 27.799999999999997 2.898764108739463
45 43 baseline 43 127.60789799690248 59232.0 59237.0 8.44138303619665e-05 4041704.0 exact 1.0 1.0000168816260382 7.326284526634964 1.25 5.803235519206498
46 44 baseline 44 166.38699293136597 59201.0 59206.0 8.445803280349994e-05 5151689.0 exact 1.0 1.0000168904653324 11.73610231262945 1.25 8.75222174086627
47 45 baseline 45 900.0007989406586 59135.0 59247.0 0.001893971421324089 26922402.0 exact 1.0 1.0020634249471458 4.661659000381325 22.394318085736028 2.560992168457917
48 46 baseline 46 900.001415014267 59152.0 59254.0 0.001724371111712199 26485728.0 exact 1.0 1.001639704515104 10.21700063178535 20.4 6.123151372348114
49 47 baseline 47 900.0020279884337 59123.0 59235.0 0.0018943558344468312 28222784.0 exact 1.0 1.0018435206169876 10.41166170663056 22.39924225766622 6.824112904473778
50 48 baseline 48 900.0011022090912 59176.0 59284.0 0.0018250642152223876 28675410.0 exact 0.9998817227920179 1.0016219503953505 9.824748557639392 21.602555089901312 6.1253016948312595
51 49 baseline 49 900.0012428760529 59150.0 59206.0 0.0009467455621301775 30531240.0 exact 1.0 1.001115995941833 12.802607222912103 11.197159763313609 9.326024324264884
52 50 ml-exact 0 649.376060962677 59162.0 59167.0 8.451370812345763e-05 18101461.0 exact 1.0 1.0004734608295711 8.532721264142948 1.0 5.445815649649917 59126.38771406158 59263.992667692604 max
53 51 ml-exact 1 900.0008749961853 59137.0 59256.99395246509 0.0020290842021930574 23342139.0 exact 1.0 1.0019443703707194 5.256439629875021 23.998790493018166 2.8907182021033684 59159.91471896955 59292.24515179818 max
54 52 ml-exact 2 900.0023529529572 59186.0 59272.0 0.0014530463285236373 24785817.0 exact 1.0 1.0013684512848238 6.615469580587714 17.2 4.085840034868203 59194.00902156645 59323.12664303628 max
55 53 ml-exact 3 900.0030598640442 59145.0 59228.0 0.0014033307971933384 24207954.0 exact 1.0 1.0013186813186814 9.839176914197518 16.599999999999998 6.010390586749109 59141.813764752675 59274.22541262452 max
56 54 ml-exact 4 900.0010681152344 59142.0 59214.0 0.0012174089479557674 24895987.0 exact 1.0 1.0011331384387514 11.236687763725765 14.462810920901886 7.469020917529618 59144.93070046487 59273.654326628006 max
57 55 ml-exact 5 900.0023910999298 59126.0 59241.96239661033 0.0019612758618937826 23775703.0 exact 1.0 1.0020799133376805 10.760164398831064 28.98520564396274 5.926781054105736 59145.04845907292 59279.36037916677 max
58 56 ml-exact 6 900.0027949810028 59125.0 59236.0 0.0018773784355179705 23994400.0 exact 1.0 1.0017926602401488 5.582934738875933 22.200000000000003 3.2873391191217904 59136.974634353304 59268.30857737715 max
59 57 ml-exact 7 900.0025460720062 59105.0 59212.0 0.001810337534895525 24113420.0 exact 1.0 1.001725596345796 1.7540919149292407 21.400000000000002 2.397315308132119 59125.024194597165 59260.190615193496 max
60 58 ml-exact 8 900.0025360584259 59169.0 59247.0 0.0013182578715205597 26072662.0 exact 1.0 1.0012505703614825 9.026104392444157 19.5 6.081660406018433 59155.83957873982 59292.27671868388 max
61 59 ml-exact 9 900.0029451847076 59130.0 59260.0 0.002198545577541011 23411285.0 exact 1.0 1.0022154949348037 7.880288248067729 25.99736174530695 4.231232189722303 59169.22723451526 59303.04692137199 max
62 60 ml-exact 10 900.002711057663 59127.0 59199.0 0.0012177177939012634 25692461.0 exact 1.0 1.001149989007458 13.042406855599213 18.0 8.550390388271678 59122.74947289353 59256.99939978048 max
63 61 ml-exact 11 900.0020880699158 59198.0 59271.0 0.0012331497685732625 25044283.0 exact 1.0 1.0014699918896999 4.941349676471181 14.59531403087942 3.042150631885348 59194.32665087494 59329.026081343145 max
64 62 ml-exact 12 900.00151014328 59102.0 59222.0 0.0020303881425332475 23860011.0 exact 1.0 1.001945624037762 5.378478055192067 24.0 2.9275210656269923 59122.0422679371 59259.06427666924 max
65 63 ml-exact 13 900.0019900798798 59150.0 59203.96517006869 0.0009123443798594788 26120169.0 exact 1.0 1.0008446625768113 12.451861238399319 13.491292517172042 8.462489899830945 59136.5588570761 59273.99511476752 max
66 64 ml-exact 14 900.0015881061554 59169.0 59254.0 0.0014365630651185586 25996193.0 exact 1.0 1.001538123489343 10.28658759195445 21.246408592337204 6.8466401908701435 59154.03941864104 59289.795816422404 max
67 65 ml-exact 15 900.0014340877533 59083.0 59198.0 0.001946414366230557 23870719.0 exact 1.0 1.0018616301110208 7.486705329259161 23.0 4.2256494328382725 59098.66203099143 59228.46969562256 max
68 66 ml-exact 16 900.0031027793884 59126.0 59230.0 0.001758955451070595 23581309.0 exact 1.0 1.0016742487020345 10.47307790601788 20.799999999999997 5.888301034790237 59145.19863458289 59278.22282794401 max
69 67 ml-exact 17 900.001091003418 59156.0 59189.0 0.0005578470484819798 25974343.0 exact 1.0 1.000557847048482 12.267031842372049 8.249442152951518 7.930031690398847 59127.14368529404 59267.37100160227 max
70 68 ml-exact 18 900.0016350746155 59118.0 59207.0 0.00150546364897324 24423315.0 exact 1.0 1.001420766875835 19.84718914391357 17.8 12.477127094628871 59117.71413049835 59253.828178881624 max
71 69 ml-exact 19 765.9083199501038 59159.0 59164.0 8.451799388089724e-05 22220414.0 exact 1.0 1.0 17.160548234580467 1.0 11.892915444124142 59144.031479967074 59274.181598455296 max
72 70 ml-exact 20 900.0015428066254 59068.0 59191.0 0.0020823457709758246 23475405.0 exact 1.0 1.0020823457709758 4.55391082948923 24.597917654229025 2.471899801493286 59082.22244394464 59224.2971810249 max
73 71 ml-exact 21 428.18276500701904 59175.0 59180.0 8.449514152936207e-05 12999314.0 exact 1.0 1.0000168978860744 8.91430318224869 1.25 6.442266072691428 59137.98908684254 59265.43858161565 max
74 72 ml-exact 22 900.0029540061951 59121.0 59157.0 0.0006089206880803775 26135751.0 exact 1.0 1.0005243040286844 14.040402909173055 7.199999999999999 9.33172387888638 59089.5855153968 59226.64120575328 max
75 73 ml-exact 23 287.76060605049133 59193.0 59198.0 8.44694474008751e-05 7976958.0 exact 1.0 1.0000337860666262 10.641189358576659 1.6666666666666665 6.883209178350868 59140.59510257357 59271.35823619222 max
76 74 ml-exact 24 900.0017418861389 59162.0 59248.0 0.0014536357797234711 25158901.0 exact 1.0 1.001639870839039 8.166047787627152 17.195348365504884 5.026430067835795 59150.82751230766 59285.37521566199 max
77 75 ml-exact 25 900.0040528774261 59096.0 59209.0 0.001912142953837823 25156445.0 exact 1.0 1.0018273802473732 4.510717859885376 22.6 2.6684138620141735 59123.050715683305 59257.270874657275 max
78 76 ml-exact 26 900.0013389587402 59089.0 59199.0 0.0018615986054934083 23531336.0 exact 1.0 1.0018276894958622 8.894245949833698 27.499069200697253 4.878379350513735 59118.02329883662 59245.3612305393 max
79 77 ml-exact 27 900.0014967918396 59153.0 59246.0 0.0015721941406183963 25053692.0 exact 1.0 1.001487541837114 10.091980240263075 18.599999999999998 6.263332181683365 59150.16240797118 59285.64193535254 max
80 78 ml-exact 28 900.001298904419 59112.0 59215.0 0.0017424550006766815 24700106.0 exact 1.0 1.0018780454791554 4.691858582488349 20.59546961699824 2.756842408849359 59105.239747395564 59244.63088727762 max
81 79 ml-exact 29 900.0012950897217 59182.0 59264.0 0.0013855564191815079 24368468.0 exact 1.0 1.001300961359758 7.564487588846076 16.4 4.493808591920299 59171.61319933031 59313.94235456237 max
82 80 ml-exact 30 900.0028159618378 59134.0 59240.0 0.0017925389792674265 24191195.0 exact 1.0 1.001707840849524 9.031835574105251 21.2 5.160347916977751 59148.03866371211 59281.43814639009 max
83 81 ml-exact 31 900.0012450218201 59082.0 59151.0 0.0011678683863105513 27104772.0 exact 1.0 1.0010836987334635 10.044831086499027 13.890208219422876 7.002353254836392 59069.41377677144 59203.823126466195 max
84 82 ml-exact 32 900.0074319839478 59175.0 59197.964519141264 0.0003880780589989647 25690960.0 exact 1.0 1.0003035572683552 18.076857400376596 4.592903828252747 11.99344749946664 59157.64838384942 59290.523875622814 max
85 83 ml-exact 33 900.0013158321381 59088.0 59177.0 0.0015062279989168698 24765342.0 exact 1.0 1.0017435758540136 6.884817107658309 17.794276333604117 4.076116410894511 59081.01088134223 59219.82627379965 max
86 84 ml-exact 34 239.74270606040955 59190.0 59195.0 8.447372867038352e-05 7385996.0 exact 1.0 1.0000337877789605 7.324251128646419 1.6666666666666667 5.427690643511643 59143.23414106734 59275.920682982185 max
87 85 ml-exact 35 900.0009059906006 59183.0 59259.953906894 0.0013002704643901015 23480392.0 exact 1.0 1.0012326001806815 12.033185805509241 19.238476723499844 7.403716868683651 59171.487583005524 59304.98468958104 max
88 86 ml-exact 36 900.0016748905182 59166.0 59246.0 0.0013521279113004091 25394548.0 exact 1.0 1.0012675128018793 6.350864646252256 16.0 3.813458994262065 59158.08309355407 59296.56640928705 max
89 87 ml-exact 37 662.0322141647339 59202.0 59207.0 8.445660619573663e-05 20024242.0 exact 1.0 1.0007944557133197 6.348714355925809 1.0 4.272648615385403 59175.74869590455 59307.463498356396 max
90 88 ml-exact 38 446.4717228412628 59212.0 59217.0 8.444234276835777e-05 13956868.0 exact 1.0 1.0000168873277493 9.424272864415235 1.25 7.086301684237463 59166.58497608687 59301.825104164076 max
91 89 ml-exact 39 900.00270819664 59143.0 59184.0 0.0006932350404950712 26147788.0 exact 1.0 1.000625560045311 19.899259335957453 10.249999999999998 13.195510421802544 59114.89526526199 59248.56148321837 max
92 90 ml-exact 40 900.0016450881958 59158.0 59249.0 0.0015382534906521518 24805820.0 exact 1.0 1.0014536112097088 11.618274687299241 18.2 7.2957371421479085 59155.37108495968 59280.88309711401 max
93 91 ml-exact 41 900.002336025238 59170.0 59245.0 0.0012675342234240324 25030081.0 exact 1.0 1.0011829319814112 9.82801163823526 15.0 6.09174261241699 59151.28565990848 59290.71555008509 max
94 92 ml-exact 42 900.0009651184082 59089.0 59214.0 0.002115452960787964 22815014.0 exact 1.0 1.0020306630114733 5.808707387795664 24.999999999999996 2.8470190237906574 59118.99827325628 59249.97235571583 max
95 93 ml-exact 43 155.91705012321472 59232.0 59237.0 8.44138303619665e-05 4841747.0 exact 1.0 1.0000168816260382 8.951582854095786 1.25 6.951968319652182 59161.307350621355 59293.05481512429 max
96 94 ml-exact 44 166.24281811714172 59201.0 59206.0 8.445803280349994e-05 5152172.0 exact 1.0 1.0000168904653324 11.72593294577673 1.25 8.753042311188128 59149.268537538504 59278.50295984831 max
97 95 ml-exact 45 900.0014069080353 59135.0 59246.0 0.0018770609622051238 26599239.0 exact 1.0 1.002046511627907 4.661662149419189 22.194368817113382 2.5302513039490457 59157.45644175721 59292.66862156513 max
98 96 ml-exact 46 900.0008680820465 59152.0 59255.0 0.0017412767108466324 25313316.0 exact 1.0 1.0016566086853627 10.21699442289862 20.599999999999998 5.852105164112592 59168.404124939494 59297.86061218224 max
99 97 ml-exact 47 900.0009942054749 59123.0 59235.0 0.0018943558344468312 26222277.0 exact 1.0 1.0018435206169876 10.411649747325901 22.39924225766622 6.340401388480525 59155.45167739807 59284.172466642885 max
100 98 ml-exact 48 900.0013608932495 59176.0 59288.0 0.00189265918615655 27293583.0 exact 0.9998817227920179 1.0016895316618233 9.82475138153239 22.40264972286062 5.830132165779587 59191.94394017174 59332.08571744459 max
101 99 ml-exact 49 900.0012040138245 59150.0 59203.0 0.0008960270498732037 30493377.0 exact 1.0 1.0010652688535677 12.802606670093036 10.59731191885038 9.314458752116828 59139.98187390398 59273.225027033564 max
102 100 ml-heuristic 0 76.10421586036682 59134.0 59139.0 8.455372543714276e-05 3323921.0 heuristic 0.9995267232345086 1.0 1.0 1.000473500862448 1.0 59126.38771406158 59263.992667692604 max
103 101 ml-heuristic 1 171.21872186660767 59137.0 59142.0 8.454943605526151e-05 8074858.0 heuristic 1.0 1.0 1.0 1.0 1.0 59159.91471896955 59292.24515179818 max
104 102 ml-heuristic 2 136.04512000083923 59186.0 59191.0 8.447943770486263e-05 6066272.0 heuristic 1.0 1.0 1.0 1.0 1.0 59194.00902156645 59323.12664303628 max
105 103 ml-heuristic 3 91.47137689590454 59145.0 59150.0 8.453799983092401e-05 4027684.0 heuristic 1.0 1.0 1.0 1.0 1.0 59141.813764752675 59274.22541262452 max
106 104 ml-heuristic 4 80.09487199783325 59142.0 59146.97828536885 8.417512713219175e-05 3333233.0 heuristic 1.0 1.0 1.0 1.0 1.0 59144.93070046487 59273.654326628006 max
107 105 ml-heuristic 5 83.6420669555664 59115.0 59119.0 6.766472130592912e-05 4011571.0 heuristic 0.999813956634983 1.0 1.0 1.0 1.0 59145.04845907292 59279.36037916677 max
108 106 ml-heuristic 6 161.20603895187378 59125.0 59130.0 8.456659619450317e-05 7299034.0 heuristic 1.0 1.0 1.0 1.0 1.0 59136.974634353304 59268.30857737715 max
109 107 ml-heuristic 7 513.0874490737915 59105.0 59110.0 8.459521191100583e-05 10058510.0 heuristic 1.0 1.0 1.0 1.0 1.0 59125.024194597165 59260.190615193496 max
110 108 ml-heuristic 8 99.7110710144043 59169.0 59173.0 6.760296777028511e-05 4287096.0 heuristic 1.0 1.0 1.0 1.0 1.0 59155.83957873982 59292.27671868388 max
111 109 ml-heuristic 9 114.2093939781189 59124.0 59129.0 8.456802652053312e-05 5532971.0 heuristic 0.999898528665652 1.0 1.0 1.0 1.0 59169.22723451526 59303.04692137199 max
112 110 ml-heuristic 10 69.00587606430054 59127.0 59131.0 6.765098855007019e-05 3004829.0 heuristic 1.0 1.0 1.0 1.0 1.0 59122.74947289353 59256.99939978048 max
113 111 ml-heuristic 11 182.13689517974854 59179.0 59184.0 8.448943037226044e-05 8232427.0 heuristic 0.9996790432109193 1.0 1.0 1.0 1.0 59194.32665087494 59329.026081343145 max
114 112 ml-heuristic 12 167.33386301994324 59102.0 59107.0 8.459950593888532e-05 8150244.0 heuristic 1.0 1.0 1.0 1.0 1.0 59122.0422679371 59259.06427666924 max
115 113 ml-heuristic 13 72.27851104736328 59150.0 59154.0 6.76246830092984e-05 3086582.0 heuristic 1.0 1.0 1.0 1.0 1.0 59136.5588570761 59273.99511476752 max
116 114 ml-heuristic 14 87.49272584915161 59159.0 59163.0 6.761439510471779e-05 3796927.0 heuristic 0.9998309925805743 1.0 1.0 1.0 1.0 59154.03941864104 59289.795816422404 max
117 115 ml-heuristic 15 120.21328401565552 59083.0 59088.0 8.462671157524161e-05 5649006.0 heuristic 1.0 1.0 1.0 1.0 1.0 59098.66203099143 59228.46969562256 max
118 116 ml-heuristic 16 85.93491911888123 59126.0 59131.0 8.456516591685553e-05 4004773.0 heuristic 1.0 1.0 1.0 1.0 1.0 59145.19863458289 59278.22282794401 max
119 117 ml-heuristic 17 73.36747002601624 59152.0 59156.0 6.76223965377333e-05 3275440.0 heuristic 0.9999323821759416 1.0 1.0 1.0 1.0 59127.14368529404 59267.37100160227 max
120 118 ml-heuristic 18 45.34655404090881 59118.0 59123.0 8.457660949287865e-05 1957447.0 heuristic 1.0 1.0 1.0 1.0 1.0 59117.71413049835 59253.828178881624 max
121 119 ml-heuristic 19 44.6319260597229 59159.0 59164.0 8.451799388089724e-05 1868374.0 heuristic 1.0 1.0 1.0 1.0 1.0 59144.031479967074 59274.181598455296 max
122 120 ml-heuristic 20 197.63266706466675 59063.0 59068.0 8.465536799688468e-05 9496908.0 heuristic 0.9999153517979278 1.0 1.0 1.0 1.0 59082.22244394464 59224.2971810249 max
123 121 ml-heuristic 21 48.03322887420654 59175.0 59179.0 6.759611322348965e-05 2017817.0 heuristic 1.0 1.0 1.0 1.0 1.0 59137.98908684254 59265.43858161565 max
124 122 ml-heuristic 22 64.1009349822998 59121.0 59126.0 8.457231778894132e-05 2800742.0 heuristic 1.0 1.0 1.0 1.0 1.0 59089.5855153968 59226.64120575328 max
125 123 ml-heuristic 23 27.042146921157837 59193.0 59196.0 5.068166844052506e-05 1158901.0 heuristic 1.0 1.0 1.0 1.0 1.0 59140.59510257357 59271.35823619222 max
126 124 ml-heuristic 24 110.21264696121216 59146.0 59151.0 8.453657052040713e-05 5005322.0 heuristic 0.9997295561340049 1.0 1.0 1.0 1.0 59150.82751230766 59285.37521566199 max
127 125 ml-heuristic 25 199.52568101882935 59096.0 59101.0 8.460809530255854e-05 9427490.0 heuristic 1.0 1.0 1.0 1.0 1.0 59123.050715683305 59257.270874657275 max
128 126 ml-heuristic 26 101.18916702270508 59087.0 59091.0 6.769678609508014e-05 4823597.0 heuristic 0.9999661527526273 1.0 1.0 1.0 1.0 59118.02329883662 59245.3612305393 max
129 127 ml-heuristic 27 89.17987108230591 59153.0 59158.0 8.452656669991379e-05 4000058.0 heuristic 1.0 1.0 1.0 1.0 1.0 59150.16240797118 59285.64193535254 max
130 128 ml-heuristic 28 191.82191514968872 59099.0 59104.0 8.46038004027141e-05 8959564.0 heuristic 0.9997800784950602 1.0 1.0 1.0 1.0 59105.239747395564 59244.63088727762 max
131 129 ml-heuristic 29 118.9771659374237 59182.0 59187.0 8.448514751106756e-05 5422676.0 heuristic 1.0 1.0 1.0 1.0 1.0 59171.61319933031 59313.94235456237 max
132 130 ml-heuristic 30 99.64783000946045 59134.0 59139.0 8.455372543714276e-05 4687900.0 heuristic 1.0 1.0 1.0 1.0 1.0 59148.03866371211 59281.43814639009 max
133 131 ml-heuristic 31 89.59844493865967 59082.0 59086.96752812557 8.407853704291519e-05 3870809.0 heuristic 1.0 1.0 1.0 1.0 1.0 59069.41377677144 59203.823126466195 max
134 132 ml-heuristic 32 49.78782606124878 59175.0 59180.0 8.449514152936207e-05 2142083.0 heuristic 1.0 1.0 1.0 1.0 1.0 59157.64838384942 59290.523875622814 max
135 133 ml-heuristic 33 130.72261786460876 59069.0 59074.0 8.464676903282602e-05 6075720.0 heuristic 0.9996784457080964 1.0 1.0 1.0 1.0 59081.01088134223 59219.82627379965 max
136 134 ml-heuristic 34 32.732726097106934 59190.0 59193.0 5.0684237202230105e-05 1360799.0 heuristic 1.0 1.0 1.0 1.0 1.0 59143.23414106734 59275.920682982185 max
137 135 ml-heuristic 35 74.79323601722717 59183.0 59187.0 6.758697598972678e-05 3171433.0 heuristic 1.0 1.0 1.0 1.0 1.0 59171.487583005524 59304.98468958104 max
138 136 ml-heuristic 36 141.71325087547302 59166.0 59171.0 8.450799445627557e-05 6659190.0 heuristic 1.0 1.0 1.0 1.0 1.0 59158.08309355407 59296.56640928705 max
139 137 ml-heuristic 37 104.27815413475037 59155.0 59160.0 8.452370890034655e-05 4686611.0 heuristic 0.9992061079017601 1.0 1.0 1.0007945228636634 1.0 59175.74869590455 59307.463498356396 max
140 138 ml-heuristic 38 47.3746600151062 59212.0 59216.0 6.755387421468622e-05 1969556.0 heuristic 1.0 1.0 1.0 1.0 1.0 59166.58497608687 59301.825104164076 max
141 139 ml-heuristic 39 45.22795009613037 59143.0 59147.0 6.763268687756793e-05 1981567.0 heuristic 1.0 1.0 1.0 1.0 1.0 59114.89526526199 59248.56148321837 max
142 140 ml-heuristic 40 77.46431112289429 59158.0 59163.0 8.451942256330505e-05 3400043.0 heuristic 1.0 1.0 1.0 1.0 1.0 59155.37108495968 59280.88309711401 max
143 141 ml-heuristic 41 91.57522082328796 59170.0 59175.0 8.450228156160216e-05 4108854.0 heuristic 1.0 1.0 1.0 1.0 1.0 59151.28565990848 59290.71555008509 max
144 142 ml-heuristic 42 154.93997287750244 59089.0 59094.0 8.461811843151856e-05 8013650.0 heuristic 1.0 1.0 1.0 1.0 1.0 59118.99827325628 59249.97235571583 max
145 143 ml-heuristic 43 17.417819023132324 59232.0 59236.0 6.75310642895732e-05 696457.0 heuristic 1.0 1.0 1.0 1.0 1.0 59161.307350621355 59293.05481512429 max
146 144 ml-heuristic 44 14.177363872528076 59201.0 59205.0 6.756642624279995e-05 588615.0 heuristic 1.0 1.0 1.0 1.0 1.0 59149.268537538504 59278.50295984831 max
147 145 ml-heuristic 45 193.06448602676392 59120.0 59125.0 8.457374830852504e-05 10512489.0 heuristic 0.9997463431132155 1.0 1.0 1.0 1.0 59157.45644175721 59292.66862156513 max
148 146 ml-heuristic 46 88.08861303329468 59152.0 59157.0 8.452799567216662e-05 4325506.0 heuristic 1.0 1.0 1.0 1.0 1.0 59168.404124939494 59297.86061218224 max
149 147 ml-heuristic 47 86.44172787666321 59121.0 59126.0 8.457231778894132e-05 4135744.0 heuristic 0.999966172217242 1.0 1.0 1.0 1.0 59155.45167739807 59284.172466642885 max
150 148 ml-heuristic 48 91.60550999641418 59183.0 59188.0 8.448371998715847e-05 4681469.0 heuristic 1.0 1.0 1.0 1.0 1.0 59191.94394017174 59332.08571744459 max
151 149 ml-heuristic 49 70.29827809333801 59135.0 59140.0 8.45522955948254e-05 3273768.0 heuristic 0.9997464074387151 1.0 1.0 1.0 1.0 59139.98187390398 59273.225027033564 max

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

View File

@@ -1,51 +0,0 @@
,Solver,Instance,Wallclock Time,Lower Bound,Upper Bound,Gap,Nodes,Relative Lower Bound,Relative Upper Bound,Relative Wallclock Time,Relative Gap,Relative Nodes
0,baseline,0,89.5249240398407,8160.106459602758,8160.106459602758,0.0,50428.0,1.0,1.0,1.0,,1.0
1,baseline,1,68.46735715866089,8329.665354500348,8329.665354500348,0.0,36735.0,1.0,1.0,1.0,,1.0
2,baseline,2,131.6971151828766,8247.871141626507,8247.871141626507,0.0,77216.0,1.0,1.0,1.0,,1.0
3,baseline,3,32.94829607009888,8386.859108879815,8386.859108879815,0.0,17422.0,1.0,1.0,1.0,,1.0
4,baseline,4,80.09613800048828,8197.045478427175,8197.045478427175,0.0,47823.0,1.0,1.0,1.0,,1.0
5,baseline,5,70.24885201454163,8184.416683317542,8184.416683317542,0.0,37633.0,1.0,1.0,1.0,,1.0
6,baseline,6,76.99211096763611,8146.291920190363,8146.291920190363,0.0,38061.0,1.0,1.0,1.0,,1.0
7,baseline,7,90.94351601600647,8332.628442208696,8332.628442208696,0.0,49185.0,1.0,1.0,1.0,,1.0
8,baseline,8,91.29237294197083,8189.394992049158,8189.394992049159,1.110576181336875e-16,52509.0,1.0,1.0,1.0,1.0,1.0
9,baseline,9,59.57663106918335,8264.94306032112,8264.94306032112,0.0,35568.0,1.0,1.0,1.0,,1.0
10,baseline,10,74.4443690776825,8225.694775199881,8225.694775199881,0.0,38905.0,1.0,1.0,1.0,,1.0
11,baseline,11,47.8407769203186,8380.21322380759,8380.21322380759,0.0,32029.0,1.0,1.0,1.0,,1.0
12,baseline,12,40.67424297332764,8335.12040209855,8335.120402098551,2.182319289698426e-16,31346.0,1.0,1.0,1.0,1.0,1.0
13,baseline,13,82.80278611183167,8180.950128996085,8180.950128996086,1.1117225840912633e-16,45396.0,1.0,1.0,1.0,1.0,1.0
14,baseline,14,89.99744701385498,8335.244300219336,8335.244300219336,0.0,47528.0,1.0,1.0,1.0,,1.0
15,baseline,15,72.18464493751526,8281.242353501702,8281.242353501702,0.0,38504.0,1.0,1.0,1.0,,1.0
16,baseline,16,42.17434501647949,8269.820198565656,8269.820198565656,0.0,23531.0,1.0,1.0,1.0,,1.0
17,baseline,17,65.91456389427185,8349.788875581982,8349.788875581982,0.0,35240.0,1.0,1.0,1.0,,1.0
18,baseline,18,49.87329697608948,8354.975512102363,8354.975512102363,0.0,31665.0,1.0,1.0,1.0,,1.0
19,baseline,19,80.3313570022583,8148.698058722395,8148.698058722395,0.0,48047.0,1.0,1.0,1.0,,1.0
20,baseline,20,34.744563817977905,8254.22546708772,8254.22546708772,0.0,19831.0,1.0,1.0,1.0,,1.0
21,baseline,21,40.45663404464722,8337.747084077018,8337.747084077018,0.0,18857.0,1.0,1.0,1.0,,1.0
22,baseline,22,59.21903705596924,8372.097133312143,8372.097133312143,0.0,37278.0,1.0,1.0,1.0,,1.0
23,baseline,23,80.84772300720215,8163.180180623385,8163.180180623385,0.0,50384.0,1.0,1.0,1.0,,1.0
24,baseline,24,79.59622597694397,8251.926305990946,8251.926305990948,2.2043209501583402e-16,45222.0,1.0,1.0,1.0,1.0,1.0
25,baseline,25,43.39374899864197,8208.77608322561,8208.77608322561,0.0,28242.0,1.0,1.0,1.0,,1.0
26,baseline,26,73.40401291847229,8263.930518826672,8263.930518826672,0.0,41508.0,1.0,1.0,1.0,,1.0
27,baseline,27,68.43603801727295,8198.51655526816,8198.51655526816,0.0,34134.0,1.0,1.0,1.0,,1.0
28,baseline,28,38.52493691444397,8429.328796791307,8429.328796791307,0.0,23191.0,1.0,1.0,1.0,,1.0
29,baseline,29,63.41107797622681,8471.392061275592,8471.392061275594,2.1472142835423904e-16,36104.0,1.0,1.0,1.0,1.0,1.0
30,baseline,30,73.6661651134491,8300.292335288888,8300.292335288888,0.0,39931.0,1.0,1.0,1.0,,1.0
31,baseline,31,34.113643169403076,8472.780799342136,8472.780799342136,0.0,17604.0,1.0,1.0,1.0,,1.0
32,baseline,32,63.027442932128906,8176.089207977811,8176.089207977811,0.0,35832.0,1.0,1.0,1.0,,1.0
33,baseline,33,54.692622900009155,8349.997774829048,8349.997774829048,0.0,36893.0,1.0,1.0,1.0,,1.0
34,baseline,34,73.5447518825531,8228.164027545597,8228.164027545597,0.0,46086.0,1.0,1.0,1.0,,1.0
35,baseline,35,32.710362911224365,8348.576374334334,8348.576374334334,0.0,17965.0,1.0,1.0,1.0,,1.0
36,baseline,36,70.76628684997559,8200.622970997243,8200.622970997245,2.2181112459126466e-16,37770.0,1.0,1.0,1.0,1.0,1.0
37,baseline,37,36.678386926651,8449.787502150532,8449.787502150532,0.0,20885.0,1.0,1.0,1.0,,1.0
38,baseline,38,86.8393452167511,8323.602064698229,8323.602064698229,0.0,50488.0,1.0,1.0,1.0,,1.0
39,baseline,39,61.66756081581116,8230.716290385615,8230.716290385615,0.0,34925.0,1.0,1.0,1.0,,1.0
40,baseline,40,115.80898809432983,8028.769787381955,8028.769787381955,0.0,69443.0,1.0,1.0,1.0,,1.0
41,baseline,41,59.32002782821655,8214.630250558439,8214.630250558439,0.0,36252.0,1.0,1.0,1.0,,1.0
42,baseline,42,27.367344856262207,8482.332346423325,8482.332346423327,2.1444448640506932e-16,10937.0,1.0,1.0,1.0,1.0,1.0
43,baseline,43,42.98321795463562,8350.150643446867,8350.150643446867,0.0,31065.0,1.0,1.0,1.0,,1.0
44,baseline,44,64.18663907051086,8325.739376420757,8325.739376420757,0.0,37466.0,1.0,1.0,1.0,,1.0
45,baseline,45,63.78522491455078,8320.79317232281,8320.793440026451,3.217285123971039e-08,38840.0,1.0,1.0,1.0,1.0,1.0
46,baseline,46,31.455862998962402,8341.756982876166,8341.756982876166,0.0,16130.0,1.0,1.0,1.0,,1.0
47,baseline,47,39.206948041915894,8206.985832918781,8206.985832918781,0.0,25335.0,1.0,1.0,1.0,,1.0
48,baseline,48,62.641757011413574,8197.315974091358,8197.315974091358,0.0,54514.0,1.0,1.0,1.0,,1.0
49,baseline,49,49.18351912498474,8090.681320538064,8090.681320538064,0.0,38800.0,1.0,1.0,1.0,,1.0
1 Solver Instance Wallclock Time Lower Bound Upper Bound Gap Nodes Relative Lower Bound Relative Upper Bound Relative Wallclock Time Relative Gap Relative Nodes
2 0 baseline 0 89.5249240398407 8160.106459602758 8160.106459602758 0.0 50428.0 1.0 1.0 1.0 1.0
3 1 baseline 1 68.46735715866089 8329.665354500348 8329.665354500348 0.0 36735.0 1.0 1.0 1.0 1.0
4 2 baseline 2 131.6971151828766 8247.871141626507 8247.871141626507 0.0 77216.0 1.0 1.0 1.0 1.0
5 3 baseline 3 32.94829607009888 8386.859108879815 8386.859108879815 0.0 17422.0 1.0 1.0 1.0 1.0
6 4 baseline 4 80.09613800048828 8197.045478427175 8197.045478427175 0.0 47823.0 1.0 1.0 1.0 1.0
7 5 baseline 5 70.24885201454163 8184.416683317542 8184.416683317542 0.0 37633.0 1.0 1.0 1.0 1.0
8 6 baseline 6 76.99211096763611 8146.291920190363 8146.291920190363 0.0 38061.0 1.0 1.0 1.0 1.0
9 7 baseline 7 90.94351601600647 8332.628442208696 8332.628442208696 0.0 49185.0 1.0 1.0 1.0 1.0
10 8 baseline 8 91.29237294197083 8189.394992049158 8189.394992049159 1.110576181336875e-16 52509.0 1.0 1.0 1.0 1.0 1.0
11 9 baseline 9 59.57663106918335 8264.94306032112 8264.94306032112 0.0 35568.0 1.0 1.0 1.0 1.0
12 10 baseline 10 74.4443690776825 8225.694775199881 8225.694775199881 0.0 38905.0 1.0 1.0 1.0 1.0
13 11 baseline 11 47.8407769203186 8380.21322380759 8380.21322380759 0.0 32029.0 1.0 1.0 1.0 1.0
14 12 baseline 12 40.67424297332764 8335.12040209855 8335.120402098551 2.182319289698426e-16 31346.0 1.0 1.0 1.0 1.0 1.0
15 13 baseline 13 82.80278611183167 8180.950128996085 8180.950128996086 1.1117225840912633e-16 45396.0 1.0 1.0 1.0 1.0 1.0
16 14 baseline 14 89.99744701385498 8335.244300219336 8335.244300219336 0.0 47528.0 1.0 1.0 1.0 1.0
17 15 baseline 15 72.18464493751526 8281.242353501702 8281.242353501702 0.0 38504.0 1.0 1.0 1.0 1.0
18 16 baseline 16 42.17434501647949 8269.820198565656 8269.820198565656 0.0 23531.0 1.0 1.0 1.0 1.0
19 17 baseline 17 65.91456389427185 8349.788875581982 8349.788875581982 0.0 35240.0 1.0 1.0 1.0 1.0
20 18 baseline 18 49.87329697608948 8354.975512102363 8354.975512102363 0.0 31665.0 1.0 1.0 1.0 1.0
21 19 baseline 19 80.3313570022583 8148.698058722395 8148.698058722395 0.0 48047.0 1.0 1.0 1.0 1.0
22 20 baseline 20 34.744563817977905 8254.22546708772 8254.22546708772 0.0 19831.0 1.0 1.0 1.0 1.0
23 21 baseline 21 40.45663404464722 8337.747084077018 8337.747084077018 0.0 18857.0 1.0 1.0 1.0 1.0
24 22 baseline 22 59.21903705596924 8372.097133312143 8372.097133312143 0.0 37278.0 1.0 1.0 1.0 1.0
25 23 baseline 23 80.84772300720215 8163.180180623385 8163.180180623385 0.0 50384.0 1.0 1.0 1.0 1.0
26 24 baseline 24 79.59622597694397 8251.926305990946 8251.926305990948 2.2043209501583402e-16 45222.0 1.0 1.0 1.0 1.0 1.0
27 25 baseline 25 43.39374899864197 8208.77608322561 8208.77608322561 0.0 28242.0 1.0 1.0 1.0 1.0
28 26 baseline 26 73.40401291847229 8263.930518826672 8263.930518826672 0.0 41508.0 1.0 1.0 1.0 1.0
29 27 baseline 27 68.43603801727295 8198.51655526816 8198.51655526816 0.0 34134.0 1.0 1.0 1.0 1.0
30 28 baseline 28 38.52493691444397 8429.328796791307 8429.328796791307 0.0 23191.0 1.0 1.0 1.0 1.0
31 29 baseline 29 63.41107797622681 8471.392061275592 8471.392061275594 2.1472142835423904e-16 36104.0 1.0 1.0 1.0 1.0 1.0
32 30 baseline 30 73.6661651134491 8300.292335288888 8300.292335288888 0.0 39931.0 1.0 1.0 1.0 1.0
33 31 baseline 31 34.113643169403076 8472.780799342136 8472.780799342136 0.0 17604.0 1.0 1.0 1.0 1.0
34 32 baseline 32 63.027442932128906 8176.089207977811 8176.089207977811 0.0 35832.0 1.0 1.0 1.0 1.0
35 33 baseline 33 54.692622900009155 8349.997774829048 8349.997774829048 0.0 36893.0 1.0 1.0 1.0 1.0
36 34 baseline 34 73.5447518825531 8228.164027545597 8228.164027545597 0.0 46086.0 1.0 1.0 1.0 1.0
37 35 baseline 35 32.710362911224365 8348.576374334334 8348.576374334334 0.0 17965.0 1.0 1.0 1.0 1.0
38 36 baseline 36 70.76628684997559 8200.622970997243 8200.622970997245 2.2181112459126466e-16 37770.0 1.0 1.0 1.0 1.0 1.0
39 37 baseline 37 36.678386926651 8449.787502150532 8449.787502150532 0.0 20885.0 1.0 1.0 1.0 1.0
40 38 baseline 38 86.8393452167511 8323.602064698229 8323.602064698229 0.0 50488.0 1.0 1.0 1.0 1.0
41 39 baseline 39 61.66756081581116 8230.716290385615 8230.716290385615 0.0 34925.0 1.0 1.0 1.0 1.0
42 40 baseline 40 115.80898809432983 8028.769787381955 8028.769787381955 0.0 69443.0 1.0 1.0 1.0 1.0
43 41 baseline 41 59.32002782821655 8214.630250558439 8214.630250558439 0.0 36252.0 1.0 1.0 1.0 1.0
44 42 baseline 42 27.367344856262207 8482.332346423325 8482.332346423327 2.1444448640506932e-16 10937.0 1.0 1.0 1.0 1.0 1.0
45 43 baseline 43 42.98321795463562 8350.150643446867 8350.150643446867 0.0 31065.0 1.0 1.0 1.0 1.0
46 44 baseline 44 64.18663907051086 8325.739376420757 8325.739376420757 0.0 37466.0 1.0 1.0 1.0 1.0
47 45 baseline 45 63.78522491455078 8320.79317232281 8320.793440026451 3.217285123971039e-08 38840.0 1.0 1.0 1.0 1.0 1.0
48 46 baseline 46 31.455862998962402 8341.756982876166 8341.756982876166 0.0 16130.0 1.0 1.0 1.0 1.0
49 47 baseline 47 39.206948041915894 8206.985832918781 8206.985832918781 0.0 25335.0 1.0 1.0 1.0 1.0
50 48 baseline 48 62.641757011413574 8197.315974091358 8197.315974091358 0.0 54514.0 1.0 1.0 1.0 1.0
51 49 baseline 49 49.18351912498474 8090.681320538064 8090.681320538064 0.0 38800.0 1.0 1.0 1.0 1.0

View File

@@ -1,151 +0,0 @@
,Solver,Instance,Wallclock Time,Lower Bound,Upper Bound,Gap,Nodes,Relative Lower Bound,Relative Upper Bound,Relative Wallclock Time,Relative Gap,Relative Nodes
0,baseline,0,89.5249240398407,8160.106459602757,8160.106459602757,0.0,50428.0,0.9999999999999999,1.0,924.2902114943435,,50428.0
1,baseline,1,68.46735715866089,8329.665354500348,8329.665354500348,0.0,36735.0,1.0,1.0090376984767917,344.32872346548237,,816.3333333333334
2,baseline,2,131.6971151828766,8247.871141626507,8247.871141626507,0.0,77216.0,1.0,1.0022162274368718,953.573952433317,,3676.9523809523807
3,baseline,3,32.94829607009888,8386.859108879815,8386.859108879815,0.0,17422.0,1.0,1.0,355.8521179348526,,17422.0
4,baseline,4,80.09613800048828,8197.045478427175,8197.045478427175,0.0,47823.0,1.0,1.0,311.613064562208,,1707.9642857142858
5,baseline,5,70.24885201454164,8184.416683317541,8184.416683317541,0.0,37633.0,0.9999999999999999,1.0,525.1624903084369,,4181.444444444444
6,baseline,6,76.99211096763611,8146.291920190362,8146.291920190362,0.0,38061.0,0.9999999999999999,1.0,769.5512234529302,,38061.0
7,baseline,7,90.94351601600648,8332.628442208696,8332.628442208696,0.0,49185.0,1.0,1.0048882560944687,958.3075896894786,,49185.0
8,baseline,8,91.29237294197084,8189.394992049158,8189.394992049159,1.1105761813368749e-16,52509.0,1.0,1.0000000000000002,1809.7036902252514,inf,52509.0
9,baseline,9,59.57663106918335,8264.94306032112,8264.94306032112,0.0,35568.0,1.0,1.0,592.7777627536799,,35568.0
10,baseline,10,74.44436907768251,8225.694775199881,8225.694775199881,0.0,38905.0,1.0,1.0023427358501626,585.1124155571589,,3536.818181818182
11,baseline,11,47.8407769203186,8380.21322380759,8380.21322380759,0.0,32029.0,1.0,1.0,544.8893215589155,,32029.0
12,baseline,12,40.674242973327644,8335.12040209855,8335.120402098551,2.1823192896984264e-16,31346.0,1.0,1.0019781200731899,345.41153746477056,inf,1362.8695652173913
13,baseline,13,82.80278611183168,8180.950128996085,8180.950128996086,1.111722584091263e-16,45396.0,1.0,1.001565824091247,307.7965272971074,inf,648.5142857142857
14,baseline,14,89.99744701385498,8335.244300219336,8335.244300219336,0.0,47528.0,1.0,1.0,770.7049722222789,,6789.714285714285
15,baseline,15,72.18464493751527,8281.242353501702,8281.242353501702,0.0,38504.0,1.0,1.0,1800.7002920237667,,38504.0
16,baseline,16,42.17434501647949,8269.820198565656,8269.820198565656,0.0,23531.0,1.0,1.000403224416854,1410.6559487069069,,23531.0
17,baseline,17,65.91456389427185,8349.788875581982,8349.788875581982,0.0,35240.0,1.0,1.0,1182.0078197481776,,35240.0
18,baseline,18,49.87329697608948,8354.975512102363,8354.975512102363,0.0,31665.0,1.0,1.0,843.6224093499328,,31665.0
19,baseline,19,80.3313570022583,8148.698058722395,8148.698058722395,0.0,48047.0,1.0,1.000966789196668,580.6596204121938,,1779.5185185185185
20,baseline,20,34.744563817977905,8254.22546708772,8254.22546708772,0.0,19831.0,1.0,1.004384894337412,508.78858964332596,,19831.0
21,baseline,21,40.45663404464722,8337.747084077018,8337.747084077018,0.0,18857.0,1.0,1.0,462.30308297552364,,18857.0
22,baseline,22,59.21903705596924,8372.097133312143,8372.097133312143,0.0,37278.0,1.0,1.0,514.5235539407097,,1433.7692307692307
23,baseline,23,80.84772300720216,8163.180180623385,8163.180180623385,0.0,50384.0,1.0,1.0036539041416717,1353.8923034540035,,50384.0
24,baseline,24,79.59622597694397,8251.926305990946,8251.926305990948,2.20432095015834e-16,45222.0,1.0,1.002542812972684,327.00203830964284,inf,674.955223880597
25,baseline,25,43.39374899864197,8208.77608322561,8208.77608322561,0.0,28242.0,1.0,1.0,203.83643836690354,,641.8636363636364
26,baseline,26,73.4040129184723,8263.930518826672,8263.930518826672,0.0,41508.0,1.0,1.0,1158.157296819456,,41508.0
27,baseline,27,68.43603801727295,8198.51655526816,8198.51655526816,0.0,34134.0,1.0,1.0,465.76709499137564,,2007.8823529411766
28,baseline,28,38.52493691444397,8429.328796791307,8429.328796791307,0.0,23191.0,1.0,1.0,212.3627067143475,,1449.4375
29,baseline,29,63.411077976226814,8471.392061275592,8471.392061275594,2.1472142835423904e-16,36104.0,1.0,1.00713405678795,422.9829560183529,inf,1504.3333333333333
30,baseline,30,73.6661651134491,8300.292335288888,8300.292335288888,0.0,39931.0,1.0,1.0,752.156311010492,,3327.5833333333335
31,baseline,31,34.113643169403076,8472.780799342136,8472.780799342136,0.0,17604.0,0.9999999880202372,1.000000008626077,605.9064481022414,,17604.0
32,baseline,32,63.0274429321289,8176.089207977811,8176.089207977811,0.0,35832.0,1.0,1.0,938.911818608021,,35832.0
33,baseline,33,54.692622900009155,8349.997774829048,8349.997774829048,0.0,36893.0,1.0,1.0,470.66514905927494,,36893.0
34,baseline,34,73.54475188255309,8228.164027545597,8228.164027545597,0.0,46086.0,1.0,1.0,433.74928744081916,,2425.5789473684213
35,baseline,35,32.710362911224365,8348.576374334334,8348.576374334334,0.0,17965.0,1.0,1.0,544.8268431962766,,17965.0
36,baseline,36,70.7662868499756,8200.622970997243,8200.622970997245,2.2181112459126463e-16,37770.0,1.0,1.0039798679142482,716.773814957293,inf,37770.0
37,baseline,37,36.678386926651,8449.787502150532,8449.787502150532,0.0,20885.0,1.0,1.0,239.07327432143046,,835.4
38,baseline,38,86.8393452167511,8323.602064698229,8323.602064698229,0.0,50488.0,1.0,1.0,791.2621177625805,,50488.0
39,baseline,39,61.66756081581116,8230.716290385615,8230.716290385615,0.0,34925.0,1.0,1.0,310.21299967617745,,1343.2692307692307
40,baseline,40,115.80898809432985,8028.769787381955,8028.769787381955,0.0,69443.0,1.0,1.0059686353257602,962.877706084664,,69443.0
41,baseline,41,59.320027828216546,8214.630250558439,8214.630250558439,0.0,36252.0,1.0,1.0,279.402791487412,,2132.470588235294
42,baseline,42,27.36734485626221,8482.332346423325,8482.332346423327,2.1444448640506927e-16,10937.0,1.0,1.0,296.90942199552,1.0,10937.0
43,baseline,43,42.98321795463562,8350.150643446867,8350.150643446867,0.0,31065.0,1.0,1.0,786.6613272710613,,31065.0
44,baseline,44,64.18663907051085,8325.739376420757,8325.739376420757,0.0,37466.0,1.0,1.0,601.5284689994793,,37466.0
45,baseline,45,63.78522491455078,8320.793172322808,8320.793440026451,3.2172851239710385e-08,38840.0,0.9999999999999998,1.0000000321728513,716.7763545319855,inf,38840.0
46,baseline,46,31.4558629989624,8341.756982876166,8341.756982876166,0.0,16130.0,1.0,1.0,613.653265116279,,16130.0
47,baseline,47,39.20694804191589,8206.985832918781,8206.985832918781,0.0,25335.0,1.0,1.0,280.93451131197753,,25335.0
48,baseline,48,62.641757011413574,8197.315974091358,8197.315974091358,0.0,54514.0,1.0,1.0073005569898068,261.05295308194985,,746.7671232876712
49,baseline,49,49.18351912498474,8090.681320538064,8090.681320538064,0.0,38800.0,1.0,1.0026687299831225,375.8371656618988,,38800.0
50,ml-exact,0,34.49193096160889,8160.106459602758,8160.106459602758,0.0,32951.0,1.0,1.0000000000000002,356.1081397753119,,32951.0
51,ml-exact,1,39.43942403793335,8329.665354500348,8329.665354500348,0.0,33716.0,1.0,1.0090376984767917,198.34454105955817,,749.2444444444444
52,ml-exact,2,54.0330810546875,8247.871141626507,8247.871141626507,0.0,48098.0,1.0,1.0022162274368718,391.2351351957892,,2290.3809523809523
53,ml-exact,3,15.311645030975342,8386.859108879815,8386.859108879815,0.0,10140.0,1.0,1.0,165.37065533668084,,10140.0
54,ml-exact,4,24.112047910690308,8197.045478427175,8197.045478427175,0.0,32151.0,1.0,1.0,93.80763330031203,,1148.25
55,ml-exact,5,33.69559407234192,8184.416683317542,8184.416683317542,0.0,31637.0,1.0,1.0000000000000002,251.89966224345207,,3515.222222222222
56,ml-exact,6,25.395578861236572,8146.291920190363,8146.291920190363,0.0,18684.0,1.0,1.0000000000000002,253.83378293361804,,18684.0
57,ml-exact,7,45.65329885482788,8332.628442208696,8332.628442208696,0.0,34261.0,1.0,1.0048882560944687,481.0667621344588,,34261.0
58,ml-exact,8,45.959444999694824,8189.394992049158,8189.394992049158,0.0,32915.0,1.0,1.0,911.0616203340486,,32915.0
59,ml-exact,9,27.292019844055176,8264.94306032112,8264.94306032112,0.0,22256.0,1.0,1.0,271.551146378204,,22256.0
60,ml-exact,10,33.28360414505005,8225.694775199881,8225.694775199883,2.2113504734336021e-16,32743.0,1.0,1.0023427358501629,261.6000412259086,inf,2976.6363636363635
61,ml-exact,11,13.287060976028442,8380.21322380759,8380.21322380759,0.0,15760.0,1.0,1.0,151.33486759210984,,15760.0
62,ml-exact,12,30.385483980178833,8335.12040209855,8335.12040209855,0.0,26800.0,1.0,1.0019781200731896,258.03791222585767,,1165.2173913043478
63,ml-exact,13,53.78090000152588,8180.950128996085,8180.950128996085,0.0,38849.0,1.0,1.0015658240912468,199.91566748763452,,554.9857142857143
64,ml-exact,14,32.64224600791931,8335.244300219336,8335.244300219336,0.0,30763.0,1.0,1.0,279.53616616406106,,4394.714285714285
65,ml-exact,15,33.97071599960327,8281.242353501702,8281.242353501702,0.0,30903.0,1.0,1.0,847.425075979707,,30903.0
66,ml-exact,16,34.40068793296814,8269.820198565656,8269.820198565656,0.0,25773.0,1.0,1.000403224416854,1150.6411078414953,,25773.0
67,ml-exact,17,29.94601798057556,8349.788875581982,8349.788875581982,0.0,26524.0,1.0,1.0,537.0046516599328,,26524.0
68,ml-exact,18,26.21188998222351,8354.975512102363,8354.975512102363,0.0,23595.0,1.0,1.0,443.3823132050057,,23595.0
69,ml-exact,19,44.91053318977356,8148.698058722395,8148.698058722396,1.1161227170509796e-16,36233.0,1.0,1.0009667891966683,324.6270712662061,inf,1341.962962962963
70,ml-exact,20,24.929107904434204,8254.22546708772,8254.225467087721,2.20370695082023e-16,19171.0,1.0,1.0043848943374123,365.0541051029243,inf,19171.0
71,ml-exact,21,23.808892011642456,8337.747084077018,8337.74708407702,2.1816317827865812e-16,16213.0,1.0,1.0000000000000002,272.0672255399839,inf,16213.0
72,ml-exact,22,29.496449947357178,8372.097133312143,8372.097133312145,2.1726807209488663e-16,23405.0,1.0,1.0000000000000002,256.27938261145164,inf,900.1923076923077
73,ml-exact,23,54.53324818611145,8163.180180623385,8163.180180623385,0.0,44205.0,1.0,1.0036539041416717,913.2247916857979,,44205.0
74,ml-exact,24,35.66223120689392,8251.926305990946,8251.926305990948,2.2043209501583402e-16,30816.0,1.0,1.002542812972684,146.50973902584275,inf,459.94029850746267
75,ml-exact,25,29.14737296104431,8208.77608322561,8208.776083225612,2.2159081757180675e-16,25610.0,1.0,1.0000000000000002,136.9159574646799,inf,582.0454545454545
76,ml-exact,26,27.671333074569702,8263.930518826672,8263.930518826672,0.0,19654.0,1.0,1.0,436.59406398705966,,19654.0
77,ml-exact,27,33.428922176361084,8198.51655526816,8198.51655526816,0.0,34427.0,1.0,1.0,227.5130533834623,,2025.1176470588234
78,ml-exact,28,26.386518955230713,8429.328796791307,8429.328796791307,0.0,21051.0,1.0,1.0,145.45157072019325,,1315.6875
79,ml-exact,29,32.452534914016724,8471.392061275592,8471.392061275594,2.1472142835423904e-16,25241.0,1.0,1.00713405678795,216.47430679803114,inf,1051.7083333333333
80,ml-exact,30,33.65191102027893,8300.292335288888,8300.292335288888,0.0,27290.0,1.0,1.0,343.59732466710483,,2274.1666666666665
81,ml-exact,31,26.8163058757782,8472.780900844042,8472.780900844042,0.0,17085.0,1.0,1.00000002060584,476.29543885799944,,17085.0
82,ml-exact,32,27.298824071884155,8176.089207977811,8176.089207977811,0.0,21127.0,1.0,1.0,406.66711773146375,,21127.0
83,ml-exact,33,27.07152795791626,8349.997774829048,8349.997774829048,0.0,20768.0,1.0,1.0,232.96788608711708,,20768.0
84,ml-exact,34,51.8715980052948,8228.164027545597,8228.164027545597,0.0,42602.0,1.0,1.0,305.92622991159624,,2242.2105263157896
85,ml-exact,35,26.559547185897827,8348.576374334334,8348.576374334334,0.0,15315.0,1.0,1.0,442.37828511067517,,15315.0
86,ml-exact,36,42.17573404312134,8200.622970997243,8200.622970997245,2.2181112459126466e-16,32284.0,1.0,1.0039798679142482,427.1873392594525,inf,32284.0
87,ml-exact,37,20.249451875686646,8449.787502150532,8449.787502150532,0.0,13815.0,1.0,1.0,131.9878862943405,,552.6
88,ml-exact,38,34.309616804122925,8323.602064698229,8323.602064698229,0.0,31546.0,1.0,1.0,312.62211828396147,,31546.0
89,ml-exact,39,28.144772052764893,8230.716290385615,8230.716290385615,0.0,22759.0,1.0,1.0,141.57969032969933,,875.3461538461538
90,ml-exact,40,54.61736702919006,8028.769787381955,8028.769787381955,0.0,46343.0,1.0,1.0059686353257602,454.1084931561159,,46343.0
91,ml-exact,41,30.99381184577942,8214.630250558439,8214.630250558439,0.0,28492.0,1.0,1.0,145.98370677815547,,1676.0
92,ml-exact,42,19.046553134918213,8482.332346423325,8482.332346423327,2.1444448640506932e-16,9292.0,1.0,1.0,206.6368188802036,1.0000000000000002,9292.0
93,ml-exact,43,29.105360984802246,8350.150643446867,8350.150643446867,0.0,24245.0,1.0,1.0,532.674448133975,,24245.0
94,ml-exact,44,28.813607215881348,8325.739376420757,8325.739376420757,0.0,22941.0,1.0,1.0,270.0282377440192,,22941.0
95,ml-exact,45,39.90794801712036,8320.79317232281,8320.79317232281,0.0,33304.0,1.0,1.0,448.459240127851,,33304.0
96,ml-exact,46,23.966022968292236,8341.756982876166,8341.756982876166,0.0,16789.0,1.0,1.0,467.53853953488374,,16789.0
97,ml-exact,47,27.642159938812256,8206.985832918781,8206.985832918781,0.0,24637.0,1.0,1.0,198.0678701569822,,24637.0
98,ml-exact,48,33.94082999229431,8197.315974091358,8197.315974091358,0.0,33963.0,1.0,1.0073005569898068,141.44484960609344,,465.24657534246575
99,ml-exact,49,37.428488969802856,8090.681320538064,8090.681320538064,0.0,39891.0,1.0,1.0026687299831225,286.0107910064622,,39891.0
100,ml-heuristic,0,0.09685802459716797,8160.106459602758,8160.106459602758,0.0,1.0,1.0,1.0000000000000002,1.0,,1.0
101,ml-heuristic,1,0.19884300231933594,8255.05862375065,8255.05862375065,0.0,45.0,0.9910432499296759,1.0,1.0,,1.0
102,ml-heuristic,2,0.1381089687347412,8229.632404496291,8229.632404496293,2.210292409357245e-16,21.0,0.9977886733658864,1.0,1.0,inf,1.0
103,ml-heuristic,3,0.0925898551940918,8386.859108879815,8386.859108879815,0.0,1.0,1.0,1.0,1.0,,1.0
104,ml-heuristic,4,0.2570371627807617,8197.045478427175,8197.045478427175,0.0,28.0,1.0,1.0,1.0,,1.0
105,ml-heuristic,5,0.13376593589782715,8184.416683317542,8184.416683317542,0.0,9.0,1.0,1.0000000000000002,1.0,,1.0
106,ml-heuristic,6,0.10004806518554688,8146.291920190363,8146.291920190363,0.0,1.0,1.0,1.0000000000000002,1.0,,1.0
107,ml-heuristic,7,0.09490013122558594,8292.094560437725,8292.094560437725,0.0,1.0,0.9951355227162599,1.0,1.0,,1.0
108,ml-heuristic,8,0.0504460334777832,8189.394992049158,8189.39499204916,2.22115236267375e-16,1.0,1.0,1.0000000000000002,1.0,inf,1.0
109,ml-heuristic,9,0.10050415992736816,8264.94306032112,8265.728238597903,9.500105095127722e-05,1.0,1.0,1.0000950010509513,1.0,inf,1.0
110,ml-heuristic,10,0.12723088264465332,8206.469185635438,8206.469185635438,0.0,11.0,0.9976627397332555,1.0,1.0,,1.0
111,ml-heuristic,11,0.087799072265625,8380.21322380759,8380.213223807592,2.1705765175261136e-16,1.0,1.0,1.0000000000000002,1.0,inf,1.0
112,ml-heuristic,12,0.11775588989257812,8318.665083714313,8318.665083714313,0.0,23.0,0.9980257851608126,1.0,1.0,,1.0
113,ml-heuristic,13,0.26901793479919434,8168.160226931591,8168.160226931591,0.0,70.0,0.9984366238807443,1.0,1.0,,1.0
114,ml-heuristic,14,0.11677289009094238,8335.244300219336,8335.244300219336,0.0,7.0,1.0,1.0,1.0,,1.0
115,ml-heuristic,15,0.040086984634399414,8281.242353501702,8281.242353501702,0.0,1.0,1.0,1.0,1.0,,1.0
116,ml-heuristic,16,0.029896974563598633,8266.48694918614,8266.48694918614,0.0,1.0,0.9995969381075426,1.0,1.0,,1.0
117,ml-heuristic,17,0.05576491355895996,8349.788875581982,8349.788875581982,0.0,1.0,1.0,1.0,1.0,,1.0
118,ml-heuristic,18,0.059118032455444336,8354.975512102363,8354.975512102363,0.0,1.0,1.0,1.0,1.0,,1.0
119,ml-heuristic,19,0.13834500312805176,8140.8275945520445,8140.8275945520445,0.0,27.0,0.9990341445819156,1.0,1.0,,1.0
120,ml-heuristic,20,0.06828880310058594,8218.189574160206,8218.189574160206,0.0,1.0,0.9956342490193416,1.0,1.0,,1.0
121,ml-heuristic,21,0.08751106262207031,8337.747084077018,8337.747084077018,0.0,1.0,1.0,1.0,1.0,,1.0
122,ml-heuristic,22,0.11509490013122559,8372.097133312143,8372.097133312143,0.0,26.0,1.0,1.0,1.0,,1.0
123,ml-heuristic,23,0.05971503257751465,8133.46129271979,8133.46129271979,0.0,1.0,0.9963593982680748,1.0,1.0,,1.0
124,ml-heuristic,24,0.24341201782226562,8230.99642151221,8230.99642151221,0.0,67.0,0.9974636365252632,1.0,1.0,,1.0
125,ml-heuristic,25,0.21288514137268066,8208.77608322561,8208.77608322561,0.0,44.0,1.0,1.0,1.0,,1.0
126,ml-heuristic,26,0.06338000297546387,8263.930518826672,8263.930518826672,0.0,1.0,1.0,1.0,1.0,,1.0
127,ml-heuristic,27,0.14693188667297363,8198.51655526816,8198.51655526816,0.0,17.0,1.0,1.0,1.0,,1.0
128,ml-heuristic,28,0.1814110279083252,8429.328796791307,8429.328796791307,0.0,16.0,1.0,1.0,1.0,,1.0
129,ml-heuristic,29,0.14991402626037598,8411.384764698932,8411.384764698932,0.0,24.0,0.99291647746408,1.0,1.0,,1.0
130,ml-heuristic,30,0.09793996810913086,8300.292335288888,8300.292335288888,0.0,12.0,1.0,1.0,1.0,,1.0
131,ml-heuristic,31,0.05630183219909668,8472.780726255278,8472.780726255278,0.0,1.0,0.9999999793941604,1.0,1.0,,1.0
132,ml-heuristic,32,0.06712818145751953,8176.089207977811,8176.089207977811,0.0,1.0,1.0,1.0,1.0,,1.0
133,ml-heuristic,33,0.11620283126831055,8349.997774829048,8349.997774829048,0.0,1.0,1.0,1.0,1.0,,1.0
134,ml-heuristic,34,0.1695559024810791,8228.164027545597,8228.164027545597,0.0,19.0,1.0,1.0,1.0,,1.0
135,ml-heuristic,35,0.060038089752197266,8348.576374334334,8348.576374334334,0.0,1.0,1.0,1.0,1.0,,1.0
136,ml-heuristic,36,0.09872889518737793,8168.114952378382,8168.114952378382,0.0,1.0,0.9960359086457419,1.0,1.0,,1.0
137,ml-heuristic,37,0.15341901779174805,8449.787502150532,8449.787502150532,0.0,25.0,1.0,1.0,1.0,,1.0
138,ml-heuristic,38,0.10974788665771484,8323.602064698229,8323.602064698229,0.0,1.0,1.0,1.0,1.0,,1.0
139,ml-heuristic,39,0.1987910270690918,8230.716290385615,8230.716290385615,0.0,26.0,1.0,1.0,1.0,,1.0
140,ml-heuristic,40,0.12027382850646973,7981.13331314949,7981.13331314949,0.0,1.0,0.9940667779131829,1.0,1.0,,1.0
141,ml-heuristic,41,0.2123100757598877,8214.630250558439,8214.630250558439,0.0,17.0,1.0,1.0,1.0,,1.0
142,ml-heuristic,42,0.09217405319213867,8482.332346423325,8482.332346423327,2.1444448640506932e-16,1.0,1.0,1.0,1.0,1.0000000000000002,1.0
143,ml-heuristic,43,0.05464005470275879,8350.150643446867,8350.150643446867,0.0,1.0,1.0,1.0,1.0,,1.0
144,ml-heuristic,44,0.10670590400695801,8325.739376420757,8325.739376420757,0.0,1.0,1.0,1.0,1.0,,1.0
145,ml-heuristic,45,0.0889890193939209,8320.79317232281,8320.79317232281,0.0,1.0,1.0,1.0,1.0,,1.0
146,ml-heuristic,46,0.05125999450683594,8341.756982876166,8341.756982876166,0.0,1.0,1.0,1.0,1.0,,1.0
147,ml-heuristic,47,0.13955903053283691,8206.985832918781,8206.985832918781,0.0,1.0,1.0,1.0,1.0,,1.0
148,ml-heuristic,48,0.2399580478668213,8137.904736782855,8137.904736782855,0.0,73.0,0.9927523548566044,1.0,1.0,,1.0
149,ml-heuristic,49,0.13086390495300293,8069.146946144667,8069.146946144667,0.0,1.0,0.9973383731801755,1.0,1.0,,1.0
1 Solver Instance Wallclock Time Lower Bound Upper Bound Gap Nodes Relative Lower Bound Relative Upper Bound Relative Wallclock Time Relative Gap Relative Nodes
2 0 baseline 0 89.5249240398407 8160.106459602757 8160.106459602757 0.0 50428.0 0.9999999999999999 1.0 924.2902114943435 50428.0
3 1 baseline 1 68.46735715866089 8329.665354500348 8329.665354500348 0.0 36735.0 1.0 1.0090376984767917 344.32872346548237 816.3333333333334
4 2 baseline 2 131.6971151828766 8247.871141626507 8247.871141626507 0.0 77216.0 1.0 1.0022162274368718 953.573952433317 3676.9523809523807
5 3 baseline 3 32.94829607009888 8386.859108879815 8386.859108879815 0.0 17422.0 1.0 1.0 355.8521179348526 17422.0
6 4 baseline 4 80.09613800048828 8197.045478427175 8197.045478427175 0.0 47823.0 1.0 1.0 311.613064562208 1707.9642857142858
7 5 baseline 5 70.24885201454164 8184.416683317541 8184.416683317541 0.0 37633.0 0.9999999999999999 1.0 525.1624903084369 4181.444444444444
8 6 baseline 6 76.99211096763611 8146.291920190362 8146.291920190362 0.0 38061.0 0.9999999999999999 1.0 769.5512234529302 38061.0
9 7 baseline 7 90.94351601600648 8332.628442208696 8332.628442208696 0.0 49185.0 1.0 1.0048882560944687 958.3075896894786 49185.0
10 8 baseline 8 91.29237294197084 8189.394992049158 8189.394992049159 1.1105761813368749e-16 52509.0 1.0 1.0000000000000002 1809.7036902252514 inf 52509.0
11 9 baseline 9 59.57663106918335 8264.94306032112 8264.94306032112 0.0 35568.0 1.0 1.0 592.7777627536799 35568.0
12 10 baseline 10 74.44436907768251 8225.694775199881 8225.694775199881 0.0 38905.0 1.0 1.0023427358501626 585.1124155571589 3536.818181818182
13 11 baseline 11 47.8407769203186 8380.21322380759 8380.21322380759 0.0 32029.0 1.0 1.0 544.8893215589155 32029.0
14 12 baseline 12 40.674242973327644 8335.12040209855 8335.120402098551 2.1823192896984264e-16 31346.0 1.0 1.0019781200731899 345.41153746477056 inf 1362.8695652173913
15 13 baseline 13 82.80278611183168 8180.950128996085 8180.950128996086 1.111722584091263e-16 45396.0 1.0 1.001565824091247 307.7965272971074 inf 648.5142857142857
16 14 baseline 14 89.99744701385498 8335.244300219336 8335.244300219336 0.0 47528.0 1.0 1.0 770.7049722222789 6789.714285714285
17 15 baseline 15 72.18464493751527 8281.242353501702 8281.242353501702 0.0 38504.0 1.0 1.0 1800.7002920237667 38504.0
18 16 baseline 16 42.17434501647949 8269.820198565656 8269.820198565656 0.0 23531.0 1.0 1.000403224416854 1410.6559487069069 23531.0
19 17 baseline 17 65.91456389427185 8349.788875581982 8349.788875581982 0.0 35240.0 1.0 1.0 1182.0078197481776 35240.0
20 18 baseline 18 49.87329697608948 8354.975512102363 8354.975512102363 0.0 31665.0 1.0 1.0 843.6224093499328 31665.0
21 19 baseline 19 80.3313570022583 8148.698058722395 8148.698058722395 0.0 48047.0 1.0 1.000966789196668 580.6596204121938 1779.5185185185185
22 20 baseline 20 34.744563817977905 8254.22546708772 8254.22546708772 0.0 19831.0 1.0 1.004384894337412 508.78858964332596 19831.0
23 21 baseline 21 40.45663404464722 8337.747084077018 8337.747084077018 0.0 18857.0 1.0 1.0 462.30308297552364 18857.0
24 22 baseline 22 59.21903705596924 8372.097133312143 8372.097133312143 0.0 37278.0 1.0 1.0 514.5235539407097 1433.7692307692307
25 23 baseline 23 80.84772300720216 8163.180180623385 8163.180180623385 0.0 50384.0 1.0 1.0036539041416717 1353.8923034540035 50384.0
26 24 baseline 24 79.59622597694397 8251.926305990946 8251.926305990948 2.20432095015834e-16 45222.0 1.0 1.002542812972684 327.00203830964284 inf 674.955223880597
27 25 baseline 25 43.39374899864197 8208.77608322561 8208.77608322561 0.0 28242.0 1.0 1.0 203.83643836690354 641.8636363636364
28 26 baseline 26 73.4040129184723 8263.930518826672 8263.930518826672 0.0 41508.0 1.0 1.0 1158.157296819456 41508.0
29 27 baseline 27 68.43603801727295 8198.51655526816 8198.51655526816 0.0 34134.0 1.0 1.0 465.76709499137564 2007.8823529411766
30 28 baseline 28 38.52493691444397 8429.328796791307 8429.328796791307 0.0 23191.0 1.0 1.0 212.3627067143475 1449.4375
31 29 baseline 29 63.411077976226814 8471.392061275592 8471.392061275594 2.1472142835423904e-16 36104.0 1.0 1.00713405678795 422.9829560183529 inf 1504.3333333333333
32 30 baseline 30 73.6661651134491 8300.292335288888 8300.292335288888 0.0 39931.0 1.0 1.0 752.156311010492 3327.5833333333335
33 31 baseline 31 34.113643169403076 8472.780799342136 8472.780799342136 0.0 17604.0 0.9999999880202372 1.000000008626077 605.9064481022414 17604.0
34 32 baseline 32 63.0274429321289 8176.089207977811 8176.089207977811 0.0 35832.0 1.0 1.0 938.911818608021 35832.0
35 33 baseline 33 54.692622900009155 8349.997774829048 8349.997774829048 0.0 36893.0 1.0 1.0 470.66514905927494 36893.0
36 34 baseline 34 73.54475188255309 8228.164027545597 8228.164027545597 0.0 46086.0 1.0 1.0 433.74928744081916 2425.5789473684213
37 35 baseline 35 32.710362911224365 8348.576374334334 8348.576374334334 0.0 17965.0 1.0 1.0 544.8268431962766 17965.0
38 36 baseline 36 70.7662868499756 8200.622970997243 8200.622970997245 2.2181112459126463e-16 37770.0 1.0 1.0039798679142482 716.773814957293 inf 37770.0
39 37 baseline 37 36.678386926651 8449.787502150532 8449.787502150532 0.0 20885.0 1.0 1.0 239.07327432143046 835.4
40 38 baseline 38 86.8393452167511 8323.602064698229 8323.602064698229 0.0 50488.0 1.0 1.0 791.2621177625805 50488.0
41 39 baseline 39 61.66756081581116 8230.716290385615 8230.716290385615 0.0 34925.0 1.0 1.0 310.21299967617745 1343.2692307692307
42 40 baseline 40 115.80898809432985 8028.769787381955 8028.769787381955 0.0 69443.0 1.0 1.0059686353257602 962.877706084664 69443.0
43 41 baseline 41 59.320027828216546 8214.630250558439 8214.630250558439 0.0 36252.0 1.0 1.0 279.402791487412 2132.470588235294
44 42 baseline 42 27.36734485626221 8482.332346423325 8482.332346423327 2.1444448640506927e-16 10937.0 1.0 1.0 296.90942199552 1.0 10937.0
45 43 baseline 43 42.98321795463562 8350.150643446867 8350.150643446867 0.0 31065.0 1.0 1.0 786.6613272710613 31065.0
46 44 baseline 44 64.18663907051085 8325.739376420757 8325.739376420757 0.0 37466.0 1.0 1.0 601.5284689994793 37466.0
47 45 baseline 45 63.78522491455078 8320.793172322808 8320.793440026451 3.2172851239710385e-08 38840.0 0.9999999999999998 1.0000000321728513 716.7763545319855 inf 38840.0
48 46 baseline 46 31.4558629989624 8341.756982876166 8341.756982876166 0.0 16130.0 1.0 1.0 613.653265116279 16130.0
49 47 baseline 47 39.20694804191589 8206.985832918781 8206.985832918781 0.0 25335.0 1.0 1.0 280.93451131197753 25335.0
50 48 baseline 48 62.641757011413574 8197.315974091358 8197.315974091358 0.0 54514.0 1.0 1.0073005569898068 261.05295308194985 746.7671232876712
51 49 baseline 49 49.18351912498474 8090.681320538064 8090.681320538064 0.0 38800.0 1.0 1.0026687299831225 375.8371656618988 38800.0
52 50 ml-exact 0 34.49193096160889 8160.106459602758 8160.106459602758 0.0 32951.0 1.0 1.0000000000000002 356.1081397753119 32951.0
53 51 ml-exact 1 39.43942403793335 8329.665354500348 8329.665354500348 0.0 33716.0 1.0 1.0090376984767917 198.34454105955817 749.2444444444444
54 52 ml-exact 2 54.0330810546875 8247.871141626507 8247.871141626507 0.0 48098.0 1.0 1.0022162274368718 391.2351351957892 2290.3809523809523
55 53 ml-exact 3 15.311645030975342 8386.859108879815 8386.859108879815 0.0 10140.0 1.0 1.0 165.37065533668084 10140.0
56 54 ml-exact 4 24.112047910690308 8197.045478427175 8197.045478427175 0.0 32151.0 1.0 1.0 93.80763330031203 1148.25
57 55 ml-exact 5 33.69559407234192 8184.416683317542 8184.416683317542 0.0 31637.0 1.0 1.0000000000000002 251.89966224345207 3515.222222222222
58 56 ml-exact 6 25.395578861236572 8146.291920190363 8146.291920190363 0.0 18684.0 1.0 1.0000000000000002 253.83378293361804 18684.0
59 57 ml-exact 7 45.65329885482788 8332.628442208696 8332.628442208696 0.0 34261.0 1.0 1.0048882560944687 481.0667621344588 34261.0
60 58 ml-exact 8 45.959444999694824 8189.394992049158 8189.394992049158 0.0 32915.0 1.0 1.0 911.0616203340486 32915.0
61 59 ml-exact 9 27.292019844055176 8264.94306032112 8264.94306032112 0.0 22256.0 1.0 1.0 271.551146378204 22256.0
62 60 ml-exact 10 33.28360414505005 8225.694775199881 8225.694775199883 2.2113504734336021e-16 32743.0 1.0 1.0023427358501629 261.6000412259086 inf 2976.6363636363635
63 61 ml-exact 11 13.287060976028442 8380.21322380759 8380.21322380759 0.0 15760.0 1.0 1.0 151.33486759210984 15760.0
64 62 ml-exact 12 30.385483980178833 8335.12040209855 8335.12040209855 0.0 26800.0 1.0 1.0019781200731896 258.03791222585767 1165.2173913043478
65 63 ml-exact 13 53.78090000152588 8180.950128996085 8180.950128996085 0.0 38849.0 1.0 1.0015658240912468 199.91566748763452 554.9857142857143
66 64 ml-exact 14 32.64224600791931 8335.244300219336 8335.244300219336 0.0 30763.0 1.0 1.0 279.53616616406106 4394.714285714285
67 65 ml-exact 15 33.97071599960327 8281.242353501702 8281.242353501702 0.0 30903.0 1.0 1.0 847.425075979707 30903.0
68 66 ml-exact 16 34.40068793296814 8269.820198565656 8269.820198565656 0.0 25773.0 1.0 1.000403224416854 1150.6411078414953 25773.0
69 67 ml-exact 17 29.94601798057556 8349.788875581982 8349.788875581982 0.0 26524.0 1.0 1.0 537.0046516599328 26524.0
70 68 ml-exact 18 26.21188998222351 8354.975512102363 8354.975512102363 0.0 23595.0 1.0 1.0 443.3823132050057 23595.0
71 69 ml-exact 19 44.91053318977356 8148.698058722395 8148.698058722396 1.1161227170509796e-16 36233.0 1.0 1.0009667891966683 324.6270712662061 inf 1341.962962962963
72 70 ml-exact 20 24.929107904434204 8254.22546708772 8254.225467087721 2.20370695082023e-16 19171.0 1.0 1.0043848943374123 365.0541051029243 inf 19171.0
73 71 ml-exact 21 23.808892011642456 8337.747084077018 8337.74708407702 2.1816317827865812e-16 16213.0 1.0 1.0000000000000002 272.0672255399839 inf 16213.0
74 72 ml-exact 22 29.496449947357178 8372.097133312143 8372.097133312145 2.1726807209488663e-16 23405.0 1.0 1.0000000000000002 256.27938261145164 inf 900.1923076923077
75 73 ml-exact 23 54.53324818611145 8163.180180623385 8163.180180623385 0.0 44205.0 1.0 1.0036539041416717 913.2247916857979 44205.0
76 74 ml-exact 24 35.66223120689392 8251.926305990946 8251.926305990948 2.2043209501583402e-16 30816.0 1.0 1.002542812972684 146.50973902584275 inf 459.94029850746267
77 75 ml-exact 25 29.14737296104431 8208.77608322561 8208.776083225612 2.2159081757180675e-16 25610.0 1.0 1.0000000000000002 136.9159574646799 inf 582.0454545454545
78 76 ml-exact 26 27.671333074569702 8263.930518826672 8263.930518826672 0.0 19654.0 1.0 1.0 436.59406398705966 19654.0
79 77 ml-exact 27 33.428922176361084 8198.51655526816 8198.51655526816 0.0 34427.0 1.0 1.0 227.5130533834623 2025.1176470588234
80 78 ml-exact 28 26.386518955230713 8429.328796791307 8429.328796791307 0.0 21051.0 1.0 1.0 145.45157072019325 1315.6875
81 79 ml-exact 29 32.452534914016724 8471.392061275592 8471.392061275594 2.1472142835423904e-16 25241.0 1.0 1.00713405678795 216.47430679803114 inf 1051.7083333333333
82 80 ml-exact 30 33.65191102027893 8300.292335288888 8300.292335288888 0.0 27290.0 1.0 1.0 343.59732466710483 2274.1666666666665
83 81 ml-exact 31 26.8163058757782 8472.780900844042 8472.780900844042 0.0 17085.0 1.0 1.00000002060584 476.29543885799944 17085.0
84 82 ml-exact 32 27.298824071884155 8176.089207977811 8176.089207977811 0.0 21127.0 1.0 1.0 406.66711773146375 21127.0
85 83 ml-exact 33 27.07152795791626 8349.997774829048 8349.997774829048 0.0 20768.0 1.0 1.0 232.96788608711708 20768.0
86 84 ml-exact 34 51.8715980052948 8228.164027545597 8228.164027545597 0.0 42602.0 1.0 1.0 305.92622991159624 2242.2105263157896
87 85 ml-exact 35 26.559547185897827 8348.576374334334 8348.576374334334 0.0 15315.0 1.0 1.0 442.37828511067517 15315.0
88 86 ml-exact 36 42.17573404312134 8200.622970997243 8200.622970997245 2.2181112459126466e-16 32284.0 1.0 1.0039798679142482 427.1873392594525 inf 32284.0
89 87 ml-exact 37 20.249451875686646 8449.787502150532 8449.787502150532 0.0 13815.0 1.0 1.0 131.9878862943405 552.6
90 88 ml-exact 38 34.309616804122925 8323.602064698229 8323.602064698229 0.0 31546.0 1.0 1.0 312.62211828396147 31546.0
91 89 ml-exact 39 28.144772052764893 8230.716290385615 8230.716290385615 0.0 22759.0 1.0 1.0 141.57969032969933 875.3461538461538
92 90 ml-exact 40 54.61736702919006 8028.769787381955 8028.769787381955 0.0 46343.0 1.0 1.0059686353257602 454.1084931561159 46343.0
93 91 ml-exact 41 30.99381184577942 8214.630250558439 8214.630250558439 0.0 28492.0 1.0 1.0 145.98370677815547 1676.0
94 92 ml-exact 42 19.046553134918213 8482.332346423325 8482.332346423327 2.1444448640506932e-16 9292.0 1.0 1.0 206.6368188802036 1.0000000000000002 9292.0
95 93 ml-exact 43 29.105360984802246 8350.150643446867 8350.150643446867 0.0 24245.0 1.0 1.0 532.674448133975 24245.0
96 94 ml-exact 44 28.813607215881348 8325.739376420757 8325.739376420757 0.0 22941.0 1.0 1.0 270.0282377440192 22941.0
97 95 ml-exact 45 39.90794801712036 8320.79317232281 8320.79317232281 0.0 33304.0 1.0 1.0 448.459240127851 33304.0
98 96 ml-exact 46 23.966022968292236 8341.756982876166 8341.756982876166 0.0 16789.0 1.0 1.0 467.53853953488374 16789.0
99 97 ml-exact 47 27.642159938812256 8206.985832918781 8206.985832918781 0.0 24637.0 1.0 1.0 198.0678701569822 24637.0
100 98 ml-exact 48 33.94082999229431 8197.315974091358 8197.315974091358 0.0 33963.0 1.0 1.0073005569898068 141.44484960609344 465.24657534246575
101 99 ml-exact 49 37.428488969802856 8090.681320538064 8090.681320538064 0.0 39891.0 1.0 1.0026687299831225 286.0107910064622 39891.0
102 100 ml-heuristic 0 0.09685802459716797 8160.106459602758 8160.106459602758 0.0 1.0 1.0 1.0000000000000002 1.0 1.0
103 101 ml-heuristic 1 0.19884300231933594 8255.05862375065 8255.05862375065 0.0 45.0 0.9910432499296759 1.0 1.0 1.0
104 102 ml-heuristic 2 0.1381089687347412 8229.632404496291 8229.632404496293 2.210292409357245e-16 21.0 0.9977886733658864 1.0 1.0 inf 1.0
105 103 ml-heuristic 3 0.0925898551940918 8386.859108879815 8386.859108879815 0.0 1.0 1.0 1.0 1.0 1.0
106 104 ml-heuristic 4 0.2570371627807617 8197.045478427175 8197.045478427175 0.0 28.0 1.0 1.0 1.0 1.0
107 105 ml-heuristic 5 0.13376593589782715 8184.416683317542 8184.416683317542 0.0 9.0 1.0 1.0000000000000002 1.0 1.0
108 106 ml-heuristic 6 0.10004806518554688 8146.291920190363 8146.291920190363 0.0 1.0 1.0 1.0000000000000002 1.0 1.0
109 107 ml-heuristic 7 0.09490013122558594 8292.094560437725 8292.094560437725 0.0 1.0 0.9951355227162599 1.0 1.0 1.0
110 108 ml-heuristic 8 0.0504460334777832 8189.394992049158 8189.39499204916 2.22115236267375e-16 1.0 1.0 1.0000000000000002 1.0 inf 1.0
111 109 ml-heuristic 9 0.10050415992736816 8264.94306032112 8265.728238597903 9.500105095127722e-05 1.0 1.0 1.0000950010509513 1.0 inf 1.0
112 110 ml-heuristic 10 0.12723088264465332 8206.469185635438 8206.469185635438 0.0 11.0 0.9976627397332555 1.0 1.0 1.0
113 111 ml-heuristic 11 0.087799072265625 8380.21322380759 8380.213223807592 2.1705765175261136e-16 1.0 1.0 1.0000000000000002 1.0 inf 1.0
114 112 ml-heuristic 12 0.11775588989257812 8318.665083714313 8318.665083714313 0.0 23.0 0.9980257851608126 1.0 1.0 1.0
115 113 ml-heuristic 13 0.26901793479919434 8168.160226931591 8168.160226931591 0.0 70.0 0.9984366238807443 1.0 1.0 1.0
116 114 ml-heuristic 14 0.11677289009094238 8335.244300219336 8335.244300219336 0.0 7.0 1.0 1.0 1.0 1.0
117 115 ml-heuristic 15 0.040086984634399414 8281.242353501702 8281.242353501702 0.0 1.0 1.0 1.0 1.0 1.0
118 116 ml-heuristic 16 0.029896974563598633 8266.48694918614 8266.48694918614 0.0 1.0 0.9995969381075426 1.0 1.0 1.0
119 117 ml-heuristic 17 0.05576491355895996 8349.788875581982 8349.788875581982 0.0 1.0 1.0 1.0 1.0 1.0
120 118 ml-heuristic 18 0.059118032455444336 8354.975512102363 8354.975512102363 0.0 1.0 1.0 1.0 1.0 1.0
121 119 ml-heuristic 19 0.13834500312805176 8140.8275945520445 8140.8275945520445 0.0 27.0 0.9990341445819156 1.0 1.0 1.0
122 120 ml-heuristic 20 0.06828880310058594 8218.189574160206 8218.189574160206 0.0 1.0 0.9956342490193416 1.0 1.0 1.0
123 121 ml-heuristic 21 0.08751106262207031 8337.747084077018 8337.747084077018 0.0 1.0 1.0 1.0 1.0 1.0
124 122 ml-heuristic 22 0.11509490013122559 8372.097133312143 8372.097133312143 0.0 26.0 1.0 1.0 1.0 1.0
125 123 ml-heuristic 23 0.05971503257751465 8133.46129271979 8133.46129271979 0.0 1.0 0.9963593982680748 1.0 1.0 1.0
126 124 ml-heuristic 24 0.24341201782226562 8230.99642151221 8230.99642151221 0.0 67.0 0.9974636365252632 1.0 1.0 1.0
127 125 ml-heuristic 25 0.21288514137268066 8208.77608322561 8208.77608322561 0.0 44.0 1.0 1.0 1.0 1.0
128 126 ml-heuristic 26 0.06338000297546387 8263.930518826672 8263.930518826672 0.0 1.0 1.0 1.0 1.0 1.0
129 127 ml-heuristic 27 0.14693188667297363 8198.51655526816 8198.51655526816 0.0 17.0 1.0 1.0 1.0 1.0
130 128 ml-heuristic 28 0.1814110279083252 8429.328796791307 8429.328796791307 0.0 16.0 1.0 1.0 1.0 1.0
131 129 ml-heuristic 29 0.14991402626037598 8411.384764698932 8411.384764698932 0.0 24.0 0.99291647746408 1.0 1.0 1.0
132 130 ml-heuristic 30 0.09793996810913086 8300.292335288888 8300.292335288888 0.0 12.0 1.0 1.0 1.0 1.0
133 131 ml-heuristic 31 0.05630183219909668 8472.780726255278 8472.780726255278 0.0 1.0 0.9999999793941604 1.0 1.0 1.0
134 132 ml-heuristic 32 0.06712818145751953 8176.089207977811 8176.089207977811 0.0 1.0 1.0 1.0 1.0 1.0
135 133 ml-heuristic 33 0.11620283126831055 8349.997774829048 8349.997774829048 0.0 1.0 1.0 1.0 1.0 1.0
136 134 ml-heuristic 34 0.1695559024810791 8228.164027545597 8228.164027545597 0.0 19.0 1.0 1.0 1.0 1.0
137 135 ml-heuristic 35 0.060038089752197266 8348.576374334334 8348.576374334334 0.0 1.0 1.0 1.0 1.0 1.0
138 136 ml-heuristic 36 0.09872889518737793 8168.114952378382 8168.114952378382 0.0 1.0 0.9960359086457419 1.0 1.0 1.0
139 137 ml-heuristic 37 0.15341901779174805 8449.787502150532 8449.787502150532 0.0 25.0 1.0 1.0 1.0 1.0
140 138 ml-heuristic 38 0.10974788665771484 8323.602064698229 8323.602064698229 0.0 1.0 1.0 1.0 1.0 1.0
141 139 ml-heuristic 39 0.1987910270690918 8230.716290385615 8230.716290385615 0.0 26.0 1.0 1.0 1.0 1.0
142 140 ml-heuristic 40 0.12027382850646973 7981.13331314949 7981.13331314949 0.0 1.0 0.9940667779131829 1.0 1.0 1.0
143 141 ml-heuristic 41 0.2123100757598877 8214.630250558439 8214.630250558439 0.0 17.0 1.0 1.0 1.0 1.0
144 142 ml-heuristic 42 0.09217405319213867 8482.332346423325 8482.332346423327 2.1444448640506932e-16 1.0 1.0 1.0 1.0 1.0000000000000002 1.0
145 143 ml-heuristic 43 0.05464005470275879 8350.150643446867 8350.150643446867 0.0 1.0 1.0 1.0 1.0 1.0
146 144 ml-heuristic 44 0.10670590400695801 8325.739376420757 8325.739376420757 0.0 1.0 1.0 1.0 1.0 1.0
147 145 ml-heuristic 45 0.0889890193939209 8320.79317232281 8320.79317232281 0.0 1.0 1.0 1.0 1.0 1.0
148 146 ml-heuristic 46 0.05125999450683594 8341.756982876166 8341.756982876166 0.0 1.0 1.0 1.0 1.0 1.0
149 147 ml-heuristic 47 0.13955903053283691 8206.985832918781 8206.985832918781 0.0 1.0 1.0 1.0 1.0 1.0
150 148 ml-heuristic 48 0.2399580478668213 8137.904736782855 8137.904736782855 0.0 73.0 0.9927523548566044 1.0 1.0 1.0
151 149 ml-heuristic 49 0.13086390495300293 8069.146946144667 8069.146946144667 0.0 1.0 0.9973383731801755 1.0 1.0 1.0

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

View File

@@ -1,51 +0,0 @@
,Solver,Instance,Wallclock Time,Lower Bound,Upper Bound,Gap,Nodes,Mode,Sense,Predicted LB,Predicted UB,Relative Lower Bound,Relative Upper Bound,Relative Wallclock Time,Relative Gap,Relative Nodes
0,baseline,0,29.597511053085327,13540.0,13540.0,0.0,1488.0,exact,min,,,1.0,1.0,1.0,,1.0
1,baseline,1,100.47623896598816,13567.0,13567.0,0.0,5209.0,exact,min,,,1.0,1.0,1.0,,1.0
2,baseline,2,95.63535189628601,13562.0,13562.0,0.0,5738.0,exact,min,,,1.0,1.0,1.0,,1.0
3,baseline,3,116.40385484695435,13522.0,13522.0,0.0,4888.0,exact,min,,,1.0,1.0,1.0,,1.0
4,baseline,4,52.82231903076172,13534.0,13534.0,0.0,2432.0,exact,min,,,1.0,1.0,1.0,,1.0
5,baseline,5,130.4400429725647,13532.0,13532.0,0.0,5217.0,exact,min,,,1.0,1.0,1.0,,1.0
6,baseline,6,138.90338110923767,13535.0,13535.0,0.0,5910.0,exact,min,,,1.0,1.0,1.0,,1.0
7,baseline,7,162.50647616386414,13613.0,13613.0,0.0,5152.0,exact,min,,,1.0,1.0,1.0,,1.0
8,baseline,8,135.88944792747498,13579.999997631374,13579.999997631372,-1.3394620057902246e-16,6720.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
9,baseline,9,62.36928915977478,13583.999994506432,13583.999994506432,0.0,3583.0,exact,min,,,1.0,1.0,1.0,,1.0
10,baseline,10,248.86321592330933,13577.0,13578.0,7.365397363187744e-05,13577.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
11,baseline,11,64.44093084335327,13574.999997985586,13574.999997985586,0.0,3149.0,exact,min,,,1.0,1.0,1.0,,1.0
12,baseline,12,74.64304614067078,13544.0,13544.0,0.0,4925.0,exact,min,,,1.0,1.0,1.0,,1.0
13,baseline,13,60.252323150634766,13534.0,13534.0,0.0,4007.0,exact,min,,,1.0,1.0,1.0,,1.0
14,baseline,14,151.05377101898193,13550.0,13551.0,7.380073800738008e-05,5389.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
15,baseline,15,94.33260798454285,13593.0,13594.0,7.356727727506805e-05,4240.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
16,baseline,16,112.65512180328369,13594.0,13594.0,0.0,5678.0,exact,min,,,1.0,1.0,1.0,,1.0
17,baseline,17,94.68812704086304,13543.0,13543.0,0.0,4110.0,exact,min,,,1.0,1.0,1.0,,1.0
18,baseline,18,119.84407782554626,13525.0,13525.0,0.0,4925.0,exact,min,,,1.0,1.0,1.0,,1.0
19,baseline,19,96.70060396194458,13564.0,13564.0,0.0,4242.0,exact,min,,,1.0,1.0,1.0,,1.0
20,baseline,20,206.73002099990845,13569.0,13569.0,0.0,5164.0,exact,min,,,1.0,1.0,1.0,,1.0
21,baseline,21,101.60346388816833,13566.0,13566.0,0.0,3797.0,exact,min,,,1.0,1.0,1.0,,1.0
22,baseline,22,39.24613690376282,13565.0,13565.0,0.0,1434.0,exact,min,,,1.0,1.0,1.0,,1.0
23,baseline,23,89.74621176719666,13580.0,13580.0,0.0,3758.0,exact,min,,,1.0,1.0,1.0,,1.0
24,baseline,24,69.45808696746826,13542.999999999996,13542.999999999998,1.343121467581671e-16,3608.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
25,baseline,25,130.97386503219604,13542.0,13542.0,0.0,6687.0,exact,min,,,1.0,1.0,1.0,,1.0
26,baseline,26,98.3358142375946,13531.999999377458,13531.99999937746,1.3442132749257606e-16,5284.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
27,baseline,27,101.37863302230835,13521.0,13522.0,7.395902669920864e-05,3512.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
28,baseline,28,47.17776012420654,13571.0,13571.0,0.0,2742.0,exact,min,,,1.0,1.0,1.0,,1.0
29,baseline,29,122.19579315185547,13594.0,13594.9999861121,7.356084390904645e-05,5138.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
30,baseline,30,159.65594601631165,13577.0,13577.0,0.0,5170.0,exact,min,,,1.0,1.0,1.0,,1.0
31,baseline,31,64.20995998382568,13582.0,13582.0,0.0,2716.0,exact,min,,,1.0,1.0,1.0,,1.0
32,baseline,32,73.25116801261902,13523.0,13524.0,7.394808844191378e-05,2705.0,exact,min,,,1.0,1.0,1.0,1.0,1.0
33,baseline,33,73.00323796272278,13548.0,13548.0,0.0,3823.0,exact,min,,,1.0,1.0,1.0,,1.0
34,baseline,34,75.30102896690369,13557.0,13557.0,0.0,2495.0,exact,min,,,1.0,1.0,1.0,,1.0
35,baseline,35,95.78053402900696,13567.999997100109,13567.999997100109,0.0,5380.0,exact,min,,,1.0,1.0,1.0,,1.0
36,baseline,36,59.77940106391907,13553.999999666667,13553.999999666667,0.0,2236.0,exact,min,,,1.0,1.0,1.0,,1.0
37,baseline,37,111.62521696090698,13532.0,13532.0,0.0,4730.0,exact,min,,,1.0,1.0,1.0,,1.0
38,baseline,38,101.59809303283691,13514.0,13514.0,0.0,4724.0,exact,min,,,1.0,1.0,1.0,,1.0
39,baseline,39,136.7306661605835,13538.0,13538.0,0.0,5301.0,exact,min,,,1.0,1.0,1.0,,1.0
40,baseline,40,96.18307614326477,13578.0,13578.0,0.0,5286.0,exact,min,,,1.0,1.0,1.0,,1.0
41,baseline,41,193.25571990013123,13526.0,13526.0,0.0,8946.0,exact,min,,,1.0,1.0,1.0,,1.0
42,baseline,42,98.80436420440674,13529.0,13529.0,0.0,2757.0,exact,min,,,1.0,1.0,1.0,,1.0
43,baseline,43,91.02266597747803,13565.0,13565.0,0.0,4119.0,exact,min,,,1.0,1.0,1.0,,1.0
44,baseline,44,44.981120109558105,13553.0,13553.0,0.0,1975.0,exact,min,,,1.0,1.0,1.0,,1.0
45,baseline,45,99.74598288536072,13521.0,13521.0,0.0,5262.0,exact,min,,,1.0,1.0,1.0,,1.0
46,baseline,46,70.65784502029419,13542.99999940547,13542.99999940547,0.0,3270.0,exact,min,,,1.0,1.0,1.0,,1.0
47,baseline,47,62.16441297531128,13564.0,13564.0,0.0,3631.0,exact,min,,,1.0,1.0,1.0,,1.0
48,baseline,48,190.54906916618347,13552.0,13552.0,0.0,9373.0,exact,min,,,1.0,1.0,1.0,,1.0
49,baseline,49,73.46178817749023,13524.0,13524.0,0.0,4053.0,exact,min,,,1.0,1.0,1.0,,1.0
1 Solver Instance Wallclock Time Lower Bound Upper Bound Gap Nodes Mode Sense Predicted LB Predicted UB Relative Lower Bound Relative Upper Bound Relative Wallclock Time Relative Gap Relative Nodes
2 0 baseline 0 29.597511053085327 13540.0 13540.0 0.0 1488.0 exact min 1.0 1.0 1.0 1.0
3 1 baseline 1 100.47623896598816 13567.0 13567.0 0.0 5209.0 exact min 1.0 1.0 1.0 1.0
4 2 baseline 2 95.63535189628601 13562.0 13562.0 0.0 5738.0 exact min 1.0 1.0 1.0 1.0
5 3 baseline 3 116.40385484695435 13522.0 13522.0 0.0 4888.0 exact min 1.0 1.0 1.0 1.0
6 4 baseline 4 52.82231903076172 13534.0 13534.0 0.0 2432.0 exact min 1.0 1.0 1.0 1.0
7 5 baseline 5 130.4400429725647 13532.0 13532.0 0.0 5217.0 exact min 1.0 1.0 1.0 1.0
8 6 baseline 6 138.90338110923767 13535.0 13535.0 0.0 5910.0 exact min 1.0 1.0 1.0 1.0
9 7 baseline 7 162.50647616386414 13613.0 13613.0 0.0 5152.0 exact min 1.0 1.0 1.0 1.0
10 8 baseline 8 135.88944792747498 13579.999997631374 13579.999997631372 -1.3394620057902246e-16 6720.0 exact min 1.0 1.0 1.0 1.0 1.0
11 9 baseline 9 62.36928915977478 13583.999994506432 13583.999994506432 0.0 3583.0 exact min 1.0 1.0 1.0 1.0
12 10 baseline 10 248.86321592330933 13577.0 13578.0 7.365397363187744e-05 13577.0 exact min 1.0 1.0 1.0 1.0 1.0
13 11 baseline 11 64.44093084335327 13574.999997985586 13574.999997985586 0.0 3149.0 exact min 1.0 1.0 1.0 1.0
14 12 baseline 12 74.64304614067078 13544.0 13544.0 0.0 4925.0 exact min 1.0 1.0 1.0 1.0
15 13 baseline 13 60.252323150634766 13534.0 13534.0 0.0 4007.0 exact min 1.0 1.0 1.0 1.0
16 14 baseline 14 151.05377101898193 13550.0 13551.0 7.380073800738008e-05 5389.0 exact min 1.0 1.0 1.0 1.0 1.0
17 15 baseline 15 94.33260798454285 13593.0 13594.0 7.356727727506805e-05 4240.0 exact min 1.0 1.0 1.0 1.0 1.0
18 16 baseline 16 112.65512180328369 13594.0 13594.0 0.0 5678.0 exact min 1.0 1.0 1.0 1.0
19 17 baseline 17 94.68812704086304 13543.0 13543.0 0.0 4110.0 exact min 1.0 1.0 1.0 1.0
20 18 baseline 18 119.84407782554626 13525.0 13525.0 0.0 4925.0 exact min 1.0 1.0 1.0 1.0
21 19 baseline 19 96.70060396194458 13564.0 13564.0 0.0 4242.0 exact min 1.0 1.0 1.0 1.0
22 20 baseline 20 206.73002099990845 13569.0 13569.0 0.0 5164.0 exact min 1.0 1.0 1.0 1.0
23 21 baseline 21 101.60346388816833 13566.0 13566.0 0.0 3797.0 exact min 1.0 1.0 1.0 1.0
24 22 baseline 22 39.24613690376282 13565.0 13565.0 0.0 1434.0 exact min 1.0 1.0 1.0 1.0
25 23 baseline 23 89.74621176719666 13580.0 13580.0 0.0 3758.0 exact min 1.0 1.0 1.0 1.0
26 24 baseline 24 69.45808696746826 13542.999999999996 13542.999999999998 1.343121467581671e-16 3608.0 exact min 1.0 1.0 1.0 1.0 1.0
27 25 baseline 25 130.97386503219604 13542.0 13542.0 0.0 6687.0 exact min 1.0 1.0 1.0 1.0
28 26 baseline 26 98.3358142375946 13531.999999377458 13531.99999937746 1.3442132749257606e-16 5284.0 exact min 1.0 1.0 1.0 1.0 1.0
29 27 baseline 27 101.37863302230835 13521.0 13522.0 7.395902669920864e-05 3512.0 exact min 1.0 1.0 1.0 1.0 1.0
30 28 baseline 28 47.17776012420654 13571.0 13571.0 0.0 2742.0 exact min 1.0 1.0 1.0 1.0
31 29 baseline 29 122.19579315185547 13594.0 13594.9999861121 7.356084390904645e-05 5138.0 exact min 1.0 1.0 1.0 1.0 1.0
32 30 baseline 30 159.65594601631165 13577.0 13577.0 0.0 5170.0 exact min 1.0 1.0 1.0 1.0
33 31 baseline 31 64.20995998382568 13582.0 13582.0 0.0 2716.0 exact min 1.0 1.0 1.0 1.0
34 32 baseline 32 73.25116801261902 13523.0 13524.0 7.394808844191378e-05 2705.0 exact min 1.0 1.0 1.0 1.0 1.0
35 33 baseline 33 73.00323796272278 13548.0 13548.0 0.0 3823.0 exact min 1.0 1.0 1.0 1.0
36 34 baseline 34 75.30102896690369 13557.0 13557.0 0.0 2495.0 exact min 1.0 1.0 1.0 1.0
37 35 baseline 35 95.78053402900696 13567.999997100109 13567.999997100109 0.0 5380.0 exact min 1.0 1.0 1.0 1.0
38 36 baseline 36 59.77940106391907 13553.999999666667 13553.999999666667 0.0 2236.0 exact min 1.0 1.0 1.0 1.0
39 37 baseline 37 111.62521696090698 13532.0 13532.0 0.0 4730.0 exact min 1.0 1.0 1.0 1.0
40 38 baseline 38 101.59809303283691 13514.0 13514.0 0.0 4724.0 exact min 1.0 1.0 1.0 1.0
41 39 baseline 39 136.7306661605835 13538.0 13538.0 0.0 5301.0 exact min 1.0 1.0 1.0 1.0
42 40 baseline 40 96.18307614326477 13578.0 13578.0 0.0 5286.0 exact min 1.0 1.0 1.0 1.0
43 41 baseline 41 193.25571990013123 13526.0 13526.0 0.0 8946.0 exact min 1.0 1.0 1.0 1.0
44 42 baseline 42 98.80436420440674 13529.0 13529.0 0.0 2757.0 exact min 1.0 1.0 1.0 1.0
45 43 baseline 43 91.02266597747803 13565.0 13565.0 0.0 4119.0 exact min 1.0 1.0 1.0 1.0
46 44 baseline 44 44.981120109558105 13553.0 13553.0 0.0 1975.0 exact min 1.0 1.0 1.0 1.0
47 45 baseline 45 99.74598288536072 13521.0 13521.0 0.0 5262.0 exact min 1.0 1.0 1.0 1.0
48 46 baseline 46 70.65784502029419 13542.99999940547 13542.99999940547 0.0 3270.0 exact min 1.0 1.0 1.0 1.0
49 47 baseline 47 62.16441297531128 13564.0 13564.0 0.0 3631.0 exact min 1.0 1.0 1.0 1.0
50 48 baseline 48 190.54906916618347 13552.0 13552.0 0.0 9373.0 exact min 1.0 1.0 1.0 1.0
51 49 baseline 49 73.46178817749023 13524.0 13524.0 0.0 4053.0 exact min 1.0 1.0 1.0 1.0

View File

@@ -1,151 +0,0 @@
,Solver,Instance,Wallclock Time,Lower Bound,Upper Bound,Gap,Nodes,Mode,Sense,Predicted LB,Predicted UB,Relative Lower Bound,Relative Upper Bound,Relative Wallclock Time,Relative Gap,Relative Nodes
0,baseline,0,29.597511053085327,13540.0,13540.0,0.0,1488.0,exact,min,,,1.0,1.0,26.86009340160744,,1488.0
1,baseline,1,100.47623896598816,13567.0,13567.0,0.0,5209.0,exact,min,,,1.0,1.0,199.7436260690364,,5209.0
2,baseline,2,95.635351896286,13562.0,13562.0,0.0,5738.0,exact,min,,,0.9999262699992627,1.0,52.66283965751092,,14.133004926108374
3,baseline,3,116.40385484695436,13522.0,13522.0,0.0,4888.0,exact,min,,,1.0,1.0,32.32019218153368,,4888.0
4,baseline,4,52.82231903076172,13534.0,13534.0,0.0,2432.0,exact,min,,,1.0,1.0,58.92863357290153,,10.57391304347826
5,baseline,5,130.4400429725647,13532.0,13532.0,0.0,5217.0,exact,min,,,1.0,1.0,77.40752944139346,,115.93333333333334
6,baseline,6,138.9033811092377,13535.0,13535.0,0.0,5910.0,exact,min,,,1.0,1.0000000000688192,77.01429912590677,,8.288920056100983
7,baseline,7,162.5064761638641,13613.0,13613.0,0.0,5152.0,exact,min,,,1.0,1.0,99.48098992115096,,20.363636363636363
8,baseline,8,135.88944792747498,13579.999997631374,13579.999997631372,-1.3394620057902246e-16,6720.0,exact,min,,,0.9999999998255799,1.0,59.017368737577044,1.0,11.893805309734514
9,baseline,9,62.36928915977478,13583.999994506434,13583.999994506434,0.0,3583.0,exact,min,,,0.9999999995955855,1.0,10.957478805772942,,13.941634241245136
10,baseline,10,248.86321592330933,13577.0,13578.0,7.365397363187744e-05,13577.0,exact,min,,,0.9999263514508764,1.0,84.90020779976263,inf,20.95216049382716
11,baseline,11,64.44093084335327,13574.999997985586,13574.999997985586,0.0,3149.0,exact,min,,,1.0,1.0,40.90397152451879,,3149.0
12,baseline,12,74.64304614067079,13544.0,13544.0,0.0,4925.0,exact,min,,,1.0,1.0,86.04782361214066,,4925.0
13,baseline,13,60.25232315063477,13534.0,13534.0,0.0,4007.0,exact,min,,,1.0,1.0,25.318911215893348,,4007.0
14,baseline,14,151.0537710189819,13550.0,13551.0,7.380073800738008e-05,5389.0,exact,min,,,0.9999262047081396,1.0,66.91125769399989,inf,74.84722222222223
15,baseline,15,94.33260798454285,13593.0,13594.0,7.356727727506805e-05,4240.0,exact,min,,,0.9999264381344711,1.0,38.51179927436251,inf,12.011331444759207
16,baseline,16,112.65512180328369,13594.0,13594.0,0.0,5678.0,exact,min,,,1.0,1.0,77.31705863554674,,14.159600997506235
17,baseline,17,94.68812704086305,13543.0,13543.0,0.0,4110.0,exact,min,,,1.0,1.0,42.677072694980595,,37.706422018348626
18,baseline,18,119.84407782554626,13525.0,13525.0,0.0,4925.0,exact,min,,,1.0,1.0,277.4617640532422,,4925.0
19,baseline,19,96.70060396194458,13564.0,13564.0,0.0,4242.0,exact,min,,,1.0,1.0,65.06829984584466,,212.1
20,baseline,20,206.73002099990845,13569.0,13569.0,0.0,5164.0,exact,min,,,1.0,1.0,165.1539992327885,,5164.0
21,baseline,21,101.60346388816832,13566.0,13566.0,0.0,3797.0,exact,min,,,1.0,1.0,70.84710384515891,,15.434959349593496
22,baseline,22,39.246136903762824,13565.0,13565.0,0.0,1434.0,exact,min,,,1.0,1.0,30.91644064571905,,1434.0
23,baseline,23,89.74621176719666,13580.0,13580.0,0.0,3758.0,exact,min,,,1.0,1.0,44.49311196559062,,67.10714285714286
24,baseline,24,69.45808696746826,13542.999999999995,13542.999999999998,1.343121467581671e-16,3608.0,exact,min,,,0.9999999999999996,1.0,42.55165506627291,inf,3608.0
25,baseline,25,130.97386503219604,13542.0,13542.0,0.0,6687.0,exact,min,,,1.0,1.0,96.75237348307111,,6687.0
26,baseline,26,98.33581423759459,13531.999999377458,13531.999999377458,1.3442132749257606e-16,5284.0,exact,min,,,0.9999999999539948,1.0,43.94880897162418,inf,5284.0
27,baseline,27,101.37863302230836,13521.0,13522.0,7.395902669920864e-05,3512.0,exact,min,,,0.9999260464428339,1.0,71.28055311506921,inf,3512.0
28,baseline,28,47.17776012420654,13571.0,13571.0,0.0,2742.0,exact,min,,,1.0,1.0,42.99836374991145,,182.8
29,baseline,29,122.19579315185548,13594.0,13594.9999861121,7.356084390904645e-05,5138.0,exact,min,,,0.9999264435454212,1.0,105.57202248105209,inf,5138.0
30,baseline,30,159.65594601631162,13577.0,13577.0,0.0,5170.0,exact,min,,,1.0,1.0,86.70719628520685,,206.8
31,baseline,31,64.20995998382568,13582.0,13582.0,0.0,2716.0,exact,min,,,1.0,1.0,22.238063526379513,,90.53333333333333
32,baseline,32,73.25116801261902,13523.0,13524.0,7.394808844191378e-05,2705.0,exact,min,,,0.9999260573794735,1.0,97.84536245319715,inf,40.984848484848484
33,baseline,33,73.00323796272278,13548.0,13548.0,0.0,3823.0,exact,min,,,1.0,1.0,35.56819949839414,,4.0115424973767055
34,baseline,34,75.30102896690369,13557.0,13557.0,0.0,2495.0,exact,min,,,1.0,1.0,33.9975137664807,,31.1875
35,baseline,35,95.78053402900696,13567.999997100107,13567.999997100107,0.0,5380.0,exact,min,,,0.9999999997862696,1.0,91.7838074273266,,12.089887640449438
36,baseline,36,59.77940106391907,13553.999999666668,13553.999999666668,0.0,2236.0,exact,min,,,0.9999999999754071,1.000000000068143,80.66210177722816,,17.746031746031747
37,baseline,37,111.62521696090698,13532.0,13532.0,0.0,4730.0,exact,min,,,1.0,1.0,44.52758005552085,,44.205607476635514
38,baseline,38,101.59809303283691,13514.0,13514.0,0.0,4724.0,exact,min,,,1.0,1.0,67.8739169651946,,4724.0
39,baseline,39,136.7306661605835,13538.0,13538.0,0.0,5301.0,exact,min,,,1.0,1.0,80.14397099282577,,35.10596026490066
40,baseline,40,96.18307614326477,13578.0,13578.0,0.0,5286.0,exact,min,,,1.0,1.0,51.5351421556022,,5286.0
41,baseline,41,193.25571990013125,13526.0,13526.0,0.0,8946.0,exact,min,,,1.0,1.0,76.43245706873643,,8946.0
42,baseline,42,98.80436420440674,13529.0,13529.0,0.0,2757.0,exact,min,,,0.9999999999999999,1.0,35.10803321142842,,58.659574468085104
43,baseline,43,91.02266597747804,13565.0,13565.0,0.0,4119.0,exact,min,,,1.0,1.0,12.480728782988091,,15.426966292134832
44,baseline,44,44.981120109558105,13553.0,13553.0,0.0,1975.0,exact,min,,,1.0,1.0,25.092447494113404,,1975.0
45,baseline,45,99.74598288536072,13521.0,13521.0,0.0,5262.0,exact,min,,,1.0,1.0,39.85221202580209,,5262.0
46,baseline,46,70.65784502029419,13542.99999940547,13542.99999940547,0.0,3270.0,exact,min,,,0.9999999999561006,1.0,45.453685199539756,,3270.0
47,baseline,47,62.16441297531128,13564.0,13564.0,0.0,3631.0,exact,min,,,1.0,1.0,20.033164659276355,,3631.0
48,baseline,48,190.54906916618347,13552.0,13552.0,0.0,9373.0,exact,min,,,1.0,1.0,103.71179496429484,,9373.0
49,baseline,49,73.46178817749023,13524.0,13524.0,0.0,4053.0,exact,min,,,1.0,1.0,26.241432260088718,,8.966814159292035
50,ml-exact,0,11.3649320602417,13540.0,13540.0,0.0,1.0,exact,min,13534.675569817877,13534.83622755677,1.0,1.0,10.31381105301398,,1.0
51,ml-exact,1,10.329864025115967,13567.0,13567.0,0.0,1.0,exact,min,13566.029921819729,13566.142424385062,1.0,1.0,20.535447170501705,,1.0
52,ml-exact,2,12.315430164337158,13562.0,13562.0,0.0,406.0,exact,min,13545.26825630499,13545.412645404165,0.9999262699992627,1.0,6.78165041689932,,1.0
53,ml-exact,3,12.996630907058716,13522.0,13522.0,0.0,37.0,exact,min,13513.490196843653,13513.683391861978,1.0,1.0,3.6085884714116796,,37.0
54,ml-exact,4,11.032249212265015,13534.0,13534.0,0.0,230.0,exact,min,13552.471283116225,13552.604609540394,1.0,1.0,12.307588595947369,,1.0
55,ml-exact,5,13.653040885925293,13532.0,13532.0,0.0,45.0,exact,min,13557.55577263004,13557.681290107144,1.0,1.0,8.102175836940628,,1.0
56,ml-exact,6,16.461652040481567,13535.0,13535.0,0.0,1805.0,exact,min,13536.370399655816,13536.528454412353,1.0,1.0000000000688192,9.127082323181316,,2.5315568022440393
57,ml-exact,7,13.48779296875,13613.0,13613.0,0.0,253.0,exact,min,13595.689443983643,13595.75639435777,1.0,1.0,8.256772456439219,,1.0
58,ml-exact,8,14.816275835037231,13580.0,13580.0,0.0,565.0,exact,min,13588.910124631891,13588.987486935437,1.0,1.0000000001744203,6.434771997460219,-0.0,1.0
59,ml-exact,9,14.60462999343872,13584.0,13584.0,0.0,257.0,exact,min,13569.84328895509,13569.949934810124,1.0,1.0000000004044145,2.565844917829724,,1.0
60,ml-exact,10,14.660763025283813,13578.0,13578.0,0.0,648.0,exact,min,13568.148459117152,13568.25770795454,1.0,1.0,5.0015500391718986,,1.0
61,ml-exact,11,10.747740983963013,13574.0,13574.999999323794,7.36702021360194e-05,1.0,exact,min,13564.758799441275,13564.873254243374,0.9999263353233345,1.000000000098579,6.822143712194244,inf,1.0
62,ml-exact,12,11.216827154159546,13544.0,13544.0,0.0,1.0,exact,min,13538.912644412721,13539.06679469573,1.0,1.0,12.930656160923881,,1.0
63,ml-exact,13,10.66540789604187,13534.0,13534.0,0.0,1.0,exact,min,13559.674309927463,13559.796573676624,1.0,1.0,4.4817610588402195,,1.0
64,ml-exact,14,12.637185096740723,13551.0,13551.0,0.0,72.0,exact,min,13548.657915980866,13548.797099115332,1.0,1.0,5.597807607388606,,1.0
65,ml-exact,15,15.559112071990967,13594.0,13594.0,0.0,353.0,exact,min,13560.52172484643,13560.642687104417,1.0,1.0,6.352091962749635,,1.0
66,ml-exact,16,14.185301065444946,13594.0,13594.0,0.0,500.0,exact,min,13552.89499057571,13553.027666254291,1.0,1.0,9.735604885812853,,1.2468827930174564
67,ml-exact,17,12.099143028259277,13543.0,13543.0,0.0,109.0,exact,min,13535.522984736846,13535.682340984562,1.0,1.0,5.453228643345678,,1.0
68,ml-exact,18,9.592709064483643,13525.0,13525.0,0.0,1.0,exact,min,13525.777713168703,13525.952036564957,1.0,1.0,22.208940377976713,,1.0
69,ml-exact,19,15.68299388885498,13564.0,13564.0,0.0,20.0,exact,min,13560.098017386947,13560.21963039052,1.0,1.0,10.55283738705663,,1.0
70,ml-exact,20,11.181609153747559,13569.0,13569.0,0.0,1.0,exact,min,13549.92903835932,13550.06626925702,1.0,1.0,8.932846137524376,,1.0
71,ml-exact,21,12.961982011795044,13566.0,13566.0,0.0,246.0,exact,min,13553.742405494679,13553.873779682082,1.0,1.0,9.038263563922284,,1.0
72,ml-exact,22,10.162704944610596,13564.0,13565.0,7.372456502506635e-05,1.0,exact,min,13551.200160737772,13551.335439398708,0.9999262808698858,1.0,8.005747546324356,inf,1.0
73,ml-exact,23,14.439340114593506,13580.0,13580.0,0.0,56.0,exact,min,13560.945432305916,13561.065743818312,1.0,1.0,7.158532530536033,,1.0
74,ml-exact,24,8.9430251121521,13543.0,13543.0,0.0,1.0,exact,min,13545.691963764473,13545.835702118062,1.0,1.0000000000000002,5.478707180627428,,1.0
75,ml-exact,25,11.13078498840332,13542.0,13542.0,0.0,1.0,exact,min,13528.31995792561,13528.490376848333,1.0,1.0,8.222479088427512,,1.0
76,ml-exact,26,10.45563006401062,13532.0,13532.0,0.0,1.0,exact,min,13538.488936953237,13538.643737981833,1.0,1.0000000000460052,4.67289046136253,,1.0
77,ml-exact,27,12.658456087112427,13522.0,13522.0,0.0,1.0,exact,min,13537.641522034268,13537.797624554041,1.0,1.0,8.900314835312852,,1.0
78,ml-exact,28,11.49683690071106,13571.0,13571.0,0.0,15.0,exact,min,13560.098017386947,13560.21963039052,1.0,1.0,10.478351955003774,,1.0
79,ml-exact,29,10.038163900375366,13594.0,13595.0,7.356186552890981e-05,1.0,exact,min,13571.961826252513,13572.065218379603,0.9999264435454212,1.0000000010215446,8.672551138007995,inf,1.0
80,ml-exact,30,10.994755983352661,13576.0,13577.0,7.365939893930465e-05,25.0,exact,min,13559.250602467977,13559.373516962729,0.9999263460263681,1.0,5.971117825195893,inf,1.0
81,ml-exact,31,12.409696102142334,13581.0,13582.0,7.363228039172373e-05,30.0,exact,min,13583.401927658593,13583.48774965479,0.9999263731409218,1.0,4.297894132499397,inf,1.0
82,ml-exact,32,11.40560007095337,13524.0,13524.0,0.0,98.0,exact,min,13535.522984736846,13535.682340984562,1.0,1.0,15.235048166691241,,1.4848484848484849
83,ml-exact,33,14.968575954437256,13548.0,13548.0,0.0,953.0,exact,min,13552.89499057571,13553.027666254291,1.0,1.0,7.292899748174851,,1.0
84,ml-exact,34,10.275269031524658,13557.0,13557.0,0.0,80.0,exact,min,13543.573426467052,13543.720418548583,1.0,1.0,4.63916104661852,,1.0
85,ml-exact,35,13.136114120483398,13568.0,13568.0,0.0,445.0,exact,min,13544.42084138602,13544.566531976374,1.0,1.0000000002137304,12.587970833538003,,1.0
86,ml-exact,36,11.606173992156982,13553.999998743056,13553.999998743056,0.0,164.0,exact,min,13525.354005709218,13525.528979851062,0.9999999999072641,1.0,15.660551479908223,,1.3015873015873016
87,ml-exact,37,15.051767110824585,13532.0,13532.0,0.0,107.0,exact,min,13539.336351872207,13539.489851409624,1.0,1.0,6.004187792432415,,1.0
88,ml-exact,38,10.445327997207642,13514.0,13514.0,0.0,1.0,exact,min,13529.591080304062,13529.75954699002,1.0,1.0,6.978136144027363,,1.0
89,ml-exact,39,12.747802019119263,13538.0,13538.0,0.0,151.0,exact,min,13537.217814574784,13537.374567840145,1.0,1.0,7.472058053477855,,1.0
90,ml-exact,40,14.315036058425903,13578.0,13578.0,0.0,1.0,exact,min,13566.877336738698,13566.988537812853,1.0,1.0,7.670033521642671,,1.0
91,ml-exact,41,10.27357292175293,13525.0,13526.0,7.393715341959335e-05,1.0,exact,min,13521.540638573859,13521.721469426,0.9999260683128789,1.0,4.063188513593281,inf,1.0
92,ml-exact,42,12.76089596748352,13529.0,13529.0,0.0,47.0,exact,min,13530.014787763548,13530.182603703915,0.9999999999999999,1.0,4.534313469262858,,1.0
93,ml-exact,43,16.610208988189697,13565.0,13565.0,0.0,267.0,exact,min,13560.945432305916,13561.065743818312,1.0,1.0,2.2775372615612164,,1.0
94,ml-exact,44,9.052951097488403,13553.0,13553.0,0.0,1.0,exact,min,13571.114411333543,13571.219104951811,1.0,1.0,5.050134356975125,,1.0
95,ml-exact,45,12.605960130691528,13521.0,13521.0,0.0,1.0,exact,min,13518.998393816952,13519.183129142624,1.0,1.0,5.036547652194804,,1.0
96,ml-exact,46,12.235252141952515,13543.0,13543.0,0.0,1.0,exact,min,13543.149719007566,13543.297361834688,1.0,1.0000000000438993,7.870849996027641,,1.0
97,ml-exact,47,11.854049921035767,13564.0,13564.0,0.0,1.0,exact,min,13567.301044198182,13567.41159452675,1.0,1.0,3.8200977469489614,,1.0
98,ml-exact,48,11.03400993347168,13551.999999999978,13552.000000000004,1.8791212846548136e-15,1.0,exact,min,13558.403187549009,13558.527403534938,0.9999999999999983,1.0000000000000002,6.005576310930073,inf,1.0
99,ml-exact,49,10.517628908157349,13524.0,13524.0,0.0,547.0,exact,min,13501.626387978087,13501.837803872895,1.0,1.0,3.757023254910798,,1.2101769911504425
100,ml-heuristic,0,1.1019139289855957,13540.0,13540.0,0.0,787.0,heuristic,min,13534.675569817877,13534.83622755677,1.0,1.0,1.0,,787.0
101,ml-heuristic,1,0.503026008605957,13567.0,13567.0,0.0,142.0,heuristic,min,13566.029921819729,13566.142424385062,1.0,1.0,1.0,,142.0
102,ml-heuristic,2,1.815993070602417,13563.0,13563.0,0.0,1640.0,heuristic,min,13545.26825630499,13545.412645404165,1.0,1.000073735437251,1.0,,4.039408866995074
103,ml-heuristic,3,3.6015830039978027,13522.0,13522.0,0.0,1.0,heuristic,min,13513.490196843653,13513.683391861978,1.0,1.0,1.0,,1.0
104,ml-heuristic,4,0.8963778018951416,13534.0,13534.0,0.0,261.0,heuristic,min,13552.471283116225,13552.604609540394,1.0,1.0,1.0,,1.1347826086956523
105,ml-heuristic,5,1.685107946395874,13532.0,13532.0,0.0,265.0,heuristic,min,13557.55577263004,13557.681290107144,1.0,1.0,1.0,,5.888888888888889
106,ml-heuristic,6,1.803605079650879,13534.999999068532,13534.999999068534,1.343915333336563e-16,713.0,heuristic,min,13536.370399655816,13536.528454412353,0.9999999999311808,1.0,1.0,inf,1.0
107,ml-heuristic,7,1.6335430145263672,13613.0,13613.0,0.0,519.0,heuristic,min,13595.689443983643,13595.75639435777,1.0,1.0,1.0,,2.0513833992094863
108,ml-heuristic,8,2.3025331497192383,13580.0,13580.0,0.0,1442.0,heuristic,min,13588.910124631891,13588.987486935437,1.0,1.0000000001744203,1.0,-0.0,2.552212389380531
109,ml-heuristic,9,5.6919379234313965,13584.0,13584.0,0.0,1142.0,heuristic,min,13569.84328895509,13569.949934810124,1.0,1.0000000004044145,1.0,,4.443579766536965
110,ml-heuristic,10,2.931243896484375,13577.0,13578.0,7.365397363187744e-05,1123.0,heuristic,min,13568.148459117152,13568.25770795454,0.9999263514508764,1.0,1.0,inf,1.7330246913580247
111,ml-heuristic,11,1.5754199028015137,13574.0,13574.999998324447,7.367012851385044e-05,3.0,heuristic,min,13564.758799441275,13564.873254243374,0.9999263353233345,1.0000000000249623,1.0,inf,3.0
112,ml-heuristic,12,0.8674600124359131,13544.0,13544.0,0.0,200.0,heuristic,min,13538.912644412721,13539.06679469573,1.0,1.0,1.0,,200.0
113,ml-heuristic,13,2.3797359466552734,13534.0,13534.0,0.0,39.0,heuristic,min,13559.674309927463,13559.796573676624,1.0,1.0,1.0,,39.0
114,ml-heuristic,14,2.257524013519287,13551.0,13551.0,0.0,690.0,heuristic,min,13548.657915980866,13548.797099115332,1.0,1.0,1.0,,9.583333333333334
115,ml-heuristic,15,2.4494469165802,13593.0,13594.0,7.356727727506805e-05,1161.0,heuristic,min,13560.52172484643,13560.642687104417,0.9999264381344711,1.0,1.0,inf,3.2889518413597734
116,ml-heuristic,16,1.4570538997650146,13594.0,13594.0,0.0,401.0,heuristic,min,13552.89499057571,13553.027666254291,1.0,1.0,1.0,,1.0
117,ml-heuristic,17,2.2187118530273438,13543.0,13543.0,0.0,234.0,heuristic,min,13535.522984736846,13535.682340984562,1.0,1.0,1.0,,2.146788990825688
118,ml-heuristic,18,0.4319300651550293,13525.0,13525.0,0.0,1.0,heuristic,min,13525.777713168703,13525.952036564957,1.0,1.0,1.0,,1.0
119,ml-heuristic,19,1.4861400127410889,13564.0,13564.0,0.0,466.0,heuristic,min,13560.098017386947,13560.21963039052,1.0,1.0,1.0,,23.3
120,ml-heuristic,20,1.2517409324645996,13569.0,13569.0,0.0,274.0,heuristic,min,13549.92903835932,13550.06626925702,1.0,1.0,1.0,,274.0
121,ml-heuristic,21,1.4341230392456055,13566.0,13566.0,0.0,476.0,heuristic,min,13553.742405494679,13553.873779682082,1.0,1.0,1.0,,1.934959349593496
122,ml-heuristic,22,1.2694261074066162,13564.0,13565.0,7.372456502506635e-05,22.0,heuristic,min,13551.200160737772,13551.335439398708,0.9999262808698858,1.0,1.0,inf,22.0
123,ml-heuristic,23,2.0170810222625732,13580.0,13580.0,0.0,306.0,heuristic,min,13560.945432305916,13561.065743818312,1.0,1.0,1.0,,5.464285714285714
124,ml-heuristic,24,1.632323980331421,13543.0,13543.0,0.0,328.0,heuristic,min,13545.691963764473,13545.835702118062,1.0,1.0000000000000002,1.0,,328.0
125,ml-heuristic,25,1.3537018299102783,13542.0,13542.0,0.0,153.0,heuristic,min,13528.31995792561,13528.490376848333,1.0,1.0,1.0,,153.0
126,ml-heuristic,26,2.2375080585479736,13532.0,13532.0,0.0,1.0,heuristic,min,13538.488936953237,13538.643737981833,1.0,1.0000000000460052,1.0,,1.0
127,ml-heuristic,27,1.422248125076294,13522.0,13522.0,0.0,258.0,heuristic,min,13537.641522034268,13537.797624554041,1.0,1.0,1.0,,258.0
128,ml-heuristic,28,1.0971989631652832,13570.0,13571.0,7.369196757553427e-05,130.0,heuristic,min,13560.098017386947,13560.21963039052,0.9999263134625304,1.0,1.0,inf,8.666666666666666
129,ml-heuristic,29,1.157463788986206,13595.0,13595.0,0.0,1.0,heuristic,min,13571.961826252513,13572.065218379603,1.0,1.0000000010215446,1.0,,1.0
130,ml-heuristic,30,1.841322898864746,13577.0,13577.0,0.0,207.0,heuristic,min,13559.250602467977,13559.373516962729,1.0,1.0,1.0,,8.28
131,ml-heuristic,31,2.887389898300171,13582.0,13582.0,0.0,1061.0,heuristic,min,13583.401927658593,13583.48774965479,1.0,1.0,1.0,,35.36666666666667
132,ml-heuristic,32,0.7486422061920166,13523.0,13524.0,7.394808844191378e-05,66.0,heuristic,min,13535.522984736846,13535.682340984562,0.9999260573794735,1.0,1.0,inf,1.0
133,ml-heuristic,33,2.0524861812591553,13548.0,13548.0,0.0,1437.0,heuristic,min,13552.89499057571,13553.027666254291,1.0,1.0,1.0,,1.5078698845750262
134,ml-heuristic,34,2.214898109436035,13557.0,13557.0,0.0,373.0,heuristic,min,13543.573426467052,13543.720418548583,1.0,1.0,1.0,,4.6625
135,ml-heuristic,35,1.0435450077056885,13568.0,13568.0,0.0,623.0,heuristic,min,13544.42084138602,13544.566531976374,1.0,1.0000000002137304,1.0,,1.4
136,ml-heuristic,36,0.7411088943481445,13554.0,13554.0,0.0,126.0,heuristic,min,13525.354005709218,13525.528979851062,1.0,1.000000000092736,1.0,,1.0
137,ml-heuristic,37,2.506878137588501,13532.0,13532.0,0.0,733.0,heuristic,min,13539.336351872207,13539.489851409624,1.0,1.0,1.0,,6.850467289719626
138,ml-heuristic,38,1.4968650341033936,13514.0,13514.0,0.0,87.0,heuristic,min,13529.591080304062,13529.75954699002,1.0,1.0,1.0,,87.0
139,ml-heuristic,39,1.7060630321502686,13538.0,13538.0,0.0,235.0,heuristic,min,13537.217814574784,13537.374567840145,1.0,1.0,1.0,,1.5562913907284768
140,ml-heuristic,40,1.866358995437622,13577.0,13578.000000000002,7.365397363201142e-05,15.0,heuristic,min,13566.877336738698,13566.988537812853,0.9999263514508764,1.0000000000000002,1.0,inf,15.0
141,ml-heuristic,41,2.5284509658813477,13526.0,13526.0,0.0,217.0,heuristic,min,13521.540638573859,13521.721469426,1.0,1.0,1.0,,217.0
142,ml-heuristic,42,2.8142950534820557,13529.000000000002,13529.000000000002,0.0,201.0,heuristic,min,13530.014787763548,13530.182603703915,1.0,1.0000000000000002,1.0,,4.276595744680851
143,ml-heuristic,43,7.293056964874268,13565.0,13565.0,0.0,1485.0,heuristic,min,13560.945432305916,13561.065743818312,1.0,1.0,1.0,,5.561797752808989
144,ml-heuristic,44,1.7926158905029297,13553.0,13553.0,0.0,1.0,heuristic,min,13571.114411333543,13571.219104951811,1.0,1.0,1.0,,1.0
145,ml-heuristic,45,2.502897024154663,13521.0,13521.0,0.0,68.0,heuristic,min,13518.998393816952,13519.183129142624,1.0,1.0,1.0,,68.0
146,ml-heuristic,46,1.554502010345459,13543.0,13543.0,0.0,157.0,heuristic,min,13543.149719007566,13543.297361834688,1.0,1.0000000000438993,1.0,,157.0
147,ml-heuristic,47,3.1030750274658203,13564.0,13564.0,0.0,137.0,heuristic,min,13567.301044198182,13567.41159452675,1.0,1.0,1.0,,137.0
148,ml-heuristic,48,1.837294101715088,13552.0,13552.0,0.0,48.0,heuristic,min,13558.403187549009,13558.527403534938,1.0,1.0,1.0,,48.0
149,ml-heuristic,49,2.7994580268859863,13524.0,13524.0,0.0,452.0,heuristic,min,13501.626387978087,13501.837803872895,1.0,1.0,1.0,,1.0
1 Solver Instance Wallclock Time Lower Bound Upper Bound Gap Nodes Mode Sense Predicted LB Predicted UB Relative Lower Bound Relative Upper Bound Relative Wallclock Time Relative Gap Relative Nodes
2 0 baseline 0 29.597511053085327 13540.0 13540.0 0.0 1488.0 exact min 1.0 1.0 26.86009340160744 1488.0
3 1 baseline 1 100.47623896598816 13567.0 13567.0 0.0 5209.0 exact min 1.0 1.0 199.7436260690364 5209.0
4 2 baseline 2 95.635351896286 13562.0 13562.0 0.0 5738.0 exact min 0.9999262699992627 1.0 52.66283965751092 14.133004926108374
5 3 baseline 3 116.40385484695436 13522.0 13522.0 0.0 4888.0 exact min 1.0 1.0 32.32019218153368 4888.0
6 4 baseline 4 52.82231903076172 13534.0 13534.0 0.0 2432.0 exact min 1.0 1.0 58.92863357290153 10.57391304347826
7 5 baseline 5 130.4400429725647 13532.0 13532.0 0.0 5217.0 exact min 1.0 1.0 77.40752944139346 115.93333333333334
8 6 baseline 6 138.9033811092377 13535.0 13535.0 0.0 5910.0 exact min 1.0 1.0000000000688192 77.01429912590677 8.288920056100983
9 7 baseline 7 162.5064761638641 13613.0 13613.0 0.0 5152.0 exact min 1.0 1.0 99.48098992115096 20.363636363636363
10 8 baseline 8 135.88944792747498 13579.999997631374 13579.999997631372 -1.3394620057902246e-16 6720.0 exact min 0.9999999998255799 1.0 59.017368737577044 1.0 11.893805309734514
11 9 baseline 9 62.36928915977478 13583.999994506434 13583.999994506434 0.0 3583.0 exact min 0.9999999995955855 1.0 10.957478805772942 13.941634241245136
12 10 baseline 10 248.86321592330933 13577.0 13578.0 7.365397363187744e-05 13577.0 exact min 0.9999263514508764 1.0 84.90020779976263 inf 20.95216049382716
13 11 baseline 11 64.44093084335327 13574.999997985586 13574.999997985586 0.0 3149.0 exact min 1.0 1.0 40.90397152451879 3149.0
14 12 baseline 12 74.64304614067079 13544.0 13544.0 0.0 4925.0 exact min 1.0 1.0 86.04782361214066 4925.0
15 13 baseline 13 60.25232315063477 13534.0 13534.0 0.0 4007.0 exact min 1.0 1.0 25.318911215893348 4007.0
16 14 baseline 14 151.0537710189819 13550.0 13551.0 7.380073800738008e-05 5389.0 exact min 0.9999262047081396 1.0 66.91125769399989 inf 74.84722222222223
17 15 baseline 15 94.33260798454285 13593.0 13594.0 7.356727727506805e-05 4240.0 exact min 0.9999264381344711 1.0 38.51179927436251 inf 12.011331444759207
18 16 baseline 16 112.65512180328369 13594.0 13594.0 0.0 5678.0 exact min 1.0 1.0 77.31705863554674 14.159600997506235
19 17 baseline 17 94.68812704086305 13543.0 13543.0 0.0 4110.0 exact min 1.0 1.0 42.677072694980595 37.706422018348626
20 18 baseline 18 119.84407782554626 13525.0 13525.0 0.0 4925.0 exact min 1.0 1.0 277.4617640532422 4925.0
21 19 baseline 19 96.70060396194458 13564.0 13564.0 0.0 4242.0 exact min 1.0 1.0 65.06829984584466 212.1
22 20 baseline 20 206.73002099990845 13569.0 13569.0 0.0 5164.0 exact min 1.0 1.0 165.1539992327885 5164.0
23 21 baseline 21 101.60346388816832 13566.0 13566.0 0.0 3797.0 exact min 1.0 1.0 70.84710384515891 15.434959349593496
24 22 baseline 22 39.246136903762824 13565.0 13565.0 0.0 1434.0 exact min 1.0 1.0 30.91644064571905 1434.0
25 23 baseline 23 89.74621176719666 13580.0 13580.0 0.0 3758.0 exact min 1.0 1.0 44.49311196559062 67.10714285714286
26 24 baseline 24 69.45808696746826 13542.999999999995 13542.999999999998 1.343121467581671e-16 3608.0 exact min 0.9999999999999996 1.0 42.55165506627291 inf 3608.0
27 25 baseline 25 130.97386503219604 13542.0 13542.0 0.0 6687.0 exact min 1.0 1.0 96.75237348307111 6687.0
28 26 baseline 26 98.33581423759459 13531.999999377458 13531.999999377458 1.3442132749257606e-16 5284.0 exact min 0.9999999999539948 1.0 43.94880897162418 inf 5284.0
29 27 baseline 27 101.37863302230836 13521.0 13522.0 7.395902669920864e-05 3512.0 exact min 0.9999260464428339 1.0 71.28055311506921 inf 3512.0
30 28 baseline 28 47.17776012420654 13571.0 13571.0 0.0 2742.0 exact min 1.0 1.0 42.99836374991145 182.8
31 29 baseline 29 122.19579315185548 13594.0 13594.9999861121 7.356084390904645e-05 5138.0 exact min 0.9999264435454212 1.0 105.57202248105209 inf 5138.0
32 30 baseline 30 159.65594601631162 13577.0 13577.0 0.0 5170.0 exact min 1.0 1.0 86.70719628520685 206.8
33 31 baseline 31 64.20995998382568 13582.0 13582.0 0.0 2716.0 exact min 1.0 1.0 22.238063526379513 90.53333333333333
34 32 baseline 32 73.25116801261902 13523.0 13524.0 7.394808844191378e-05 2705.0 exact min 0.9999260573794735 1.0 97.84536245319715 inf 40.984848484848484
35 33 baseline 33 73.00323796272278 13548.0 13548.0 0.0 3823.0 exact min 1.0 1.0 35.56819949839414 4.0115424973767055
36 34 baseline 34 75.30102896690369 13557.0 13557.0 0.0 2495.0 exact min 1.0 1.0 33.9975137664807 31.1875
37 35 baseline 35 95.78053402900696 13567.999997100107 13567.999997100107 0.0 5380.0 exact min 0.9999999997862696 1.0 91.7838074273266 12.089887640449438
38 36 baseline 36 59.77940106391907 13553.999999666668 13553.999999666668 0.0 2236.0 exact min 0.9999999999754071 1.000000000068143 80.66210177722816 17.746031746031747
39 37 baseline 37 111.62521696090698 13532.0 13532.0 0.0 4730.0 exact min 1.0 1.0 44.52758005552085 44.205607476635514
40 38 baseline 38 101.59809303283691 13514.0 13514.0 0.0 4724.0 exact min 1.0 1.0 67.8739169651946 4724.0
41 39 baseline 39 136.7306661605835 13538.0 13538.0 0.0 5301.0 exact min 1.0 1.0 80.14397099282577 35.10596026490066
42 40 baseline 40 96.18307614326477 13578.0 13578.0 0.0 5286.0 exact min 1.0 1.0 51.5351421556022 5286.0
43 41 baseline 41 193.25571990013125 13526.0 13526.0 0.0 8946.0 exact min 1.0 1.0 76.43245706873643 8946.0
44 42 baseline 42 98.80436420440674 13529.0 13529.0 0.0 2757.0 exact min 0.9999999999999999 1.0 35.10803321142842 58.659574468085104
45 43 baseline 43 91.02266597747804 13565.0 13565.0 0.0 4119.0 exact min 1.0 1.0 12.480728782988091 15.426966292134832
46 44 baseline 44 44.981120109558105 13553.0 13553.0 0.0 1975.0 exact min 1.0 1.0 25.092447494113404 1975.0
47 45 baseline 45 99.74598288536072 13521.0 13521.0 0.0 5262.0 exact min 1.0 1.0 39.85221202580209 5262.0
48 46 baseline 46 70.65784502029419 13542.99999940547 13542.99999940547 0.0 3270.0 exact min 0.9999999999561006 1.0 45.453685199539756 3270.0
49 47 baseline 47 62.16441297531128 13564.0 13564.0 0.0 3631.0 exact min 1.0 1.0 20.033164659276355 3631.0
50 48 baseline 48 190.54906916618347 13552.0 13552.0 0.0 9373.0 exact min 1.0 1.0 103.71179496429484 9373.0
51 49 baseline 49 73.46178817749023 13524.0 13524.0 0.0 4053.0 exact min 1.0 1.0 26.241432260088718 8.966814159292035
52 50 ml-exact 0 11.3649320602417 13540.0 13540.0 0.0 1.0 exact min 13534.675569817877 13534.83622755677 1.0 1.0 10.31381105301398 1.0
53 51 ml-exact 1 10.329864025115967 13567.0 13567.0 0.0 1.0 exact min 13566.029921819729 13566.142424385062 1.0 1.0 20.535447170501705 1.0
54 52 ml-exact 2 12.315430164337158 13562.0 13562.0 0.0 406.0 exact min 13545.26825630499 13545.412645404165 0.9999262699992627 1.0 6.78165041689932 1.0
55 53 ml-exact 3 12.996630907058716 13522.0 13522.0 0.0 37.0 exact min 13513.490196843653 13513.683391861978 1.0 1.0 3.6085884714116796 37.0
56 54 ml-exact 4 11.032249212265015 13534.0 13534.0 0.0 230.0 exact min 13552.471283116225 13552.604609540394 1.0 1.0 12.307588595947369 1.0
57 55 ml-exact 5 13.653040885925293 13532.0 13532.0 0.0 45.0 exact min 13557.55577263004 13557.681290107144 1.0 1.0 8.102175836940628 1.0
58 56 ml-exact 6 16.461652040481567 13535.0 13535.0 0.0 1805.0 exact min 13536.370399655816 13536.528454412353 1.0 1.0000000000688192 9.127082323181316 2.5315568022440393
59 57 ml-exact 7 13.48779296875 13613.0 13613.0 0.0 253.0 exact min 13595.689443983643 13595.75639435777 1.0 1.0 8.256772456439219 1.0
60 58 ml-exact 8 14.816275835037231 13580.0 13580.0 0.0 565.0 exact min 13588.910124631891 13588.987486935437 1.0 1.0000000001744203 6.434771997460219 -0.0 1.0
61 59 ml-exact 9 14.60462999343872 13584.0 13584.0 0.0 257.0 exact min 13569.84328895509 13569.949934810124 1.0 1.0000000004044145 2.565844917829724 1.0
62 60 ml-exact 10 14.660763025283813 13578.0 13578.0 0.0 648.0 exact min 13568.148459117152 13568.25770795454 1.0 1.0 5.0015500391718986 1.0
63 61 ml-exact 11 10.747740983963013 13574.0 13574.999999323794 7.36702021360194e-05 1.0 exact min 13564.758799441275 13564.873254243374 0.9999263353233345 1.000000000098579 6.822143712194244 inf 1.0
64 62 ml-exact 12 11.216827154159546 13544.0 13544.0 0.0 1.0 exact min 13538.912644412721 13539.06679469573 1.0 1.0 12.930656160923881 1.0
65 63 ml-exact 13 10.66540789604187 13534.0 13534.0 0.0 1.0 exact min 13559.674309927463 13559.796573676624 1.0 1.0 4.4817610588402195 1.0
66 64 ml-exact 14 12.637185096740723 13551.0 13551.0 0.0 72.0 exact min 13548.657915980866 13548.797099115332 1.0 1.0 5.597807607388606 1.0
67 65 ml-exact 15 15.559112071990967 13594.0 13594.0 0.0 353.0 exact min 13560.52172484643 13560.642687104417 1.0 1.0 6.352091962749635 1.0
68 66 ml-exact 16 14.185301065444946 13594.0 13594.0 0.0 500.0 exact min 13552.89499057571 13553.027666254291 1.0 1.0 9.735604885812853 1.2468827930174564
69 67 ml-exact 17 12.099143028259277 13543.0 13543.0 0.0 109.0 exact min 13535.522984736846 13535.682340984562 1.0 1.0 5.453228643345678 1.0
70 68 ml-exact 18 9.592709064483643 13525.0 13525.0 0.0 1.0 exact min 13525.777713168703 13525.952036564957 1.0 1.0 22.208940377976713 1.0
71 69 ml-exact 19 15.68299388885498 13564.0 13564.0 0.0 20.0 exact min 13560.098017386947 13560.21963039052 1.0 1.0 10.55283738705663 1.0
72 70 ml-exact 20 11.181609153747559 13569.0 13569.0 0.0 1.0 exact min 13549.92903835932 13550.06626925702 1.0 1.0 8.932846137524376 1.0
73 71 ml-exact 21 12.961982011795044 13566.0 13566.0 0.0 246.0 exact min 13553.742405494679 13553.873779682082 1.0 1.0 9.038263563922284 1.0
74 72 ml-exact 22 10.162704944610596 13564.0 13565.0 7.372456502506635e-05 1.0 exact min 13551.200160737772 13551.335439398708 0.9999262808698858 1.0 8.005747546324356 inf 1.0
75 73 ml-exact 23 14.439340114593506 13580.0 13580.0 0.0 56.0 exact min 13560.945432305916 13561.065743818312 1.0 1.0 7.158532530536033 1.0
76 74 ml-exact 24 8.9430251121521 13543.0 13543.0 0.0 1.0 exact min 13545.691963764473 13545.835702118062 1.0 1.0000000000000002 5.478707180627428 1.0
77 75 ml-exact 25 11.13078498840332 13542.0 13542.0 0.0 1.0 exact min 13528.31995792561 13528.490376848333 1.0 1.0 8.222479088427512 1.0
78 76 ml-exact 26 10.45563006401062 13532.0 13532.0 0.0 1.0 exact min 13538.488936953237 13538.643737981833 1.0 1.0000000000460052 4.67289046136253 1.0
79 77 ml-exact 27 12.658456087112427 13522.0 13522.0 0.0 1.0 exact min 13537.641522034268 13537.797624554041 1.0 1.0 8.900314835312852 1.0
80 78 ml-exact 28 11.49683690071106 13571.0 13571.0 0.0 15.0 exact min 13560.098017386947 13560.21963039052 1.0 1.0 10.478351955003774 1.0
81 79 ml-exact 29 10.038163900375366 13594.0 13595.0 7.356186552890981e-05 1.0 exact min 13571.961826252513 13572.065218379603 0.9999264435454212 1.0000000010215446 8.672551138007995 inf 1.0
82 80 ml-exact 30 10.994755983352661 13576.0 13577.0 7.365939893930465e-05 25.0 exact min 13559.250602467977 13559.373516962729 0.9999263460263681 1.0 5.971117825195893 inf 1.0
83 81 ml-exact 31 12.409696102142334 13581.0 13582.0 7.363228039172373e-05 30.0 exact min 13583.401927658593 13583.48774965479 0.9999263731409218 1.0 4.297894132499397 inf 1.0
84 82 ml-exact 32 11.40560007095337 13524.0 13524.0 0.0 98.0 exact min 13535.522984736846 13535.682340984562 1.0 1.0 15.235048166691241 1.4848484848484849
85 83 ml-exact 33 14.968575954437256 13548.0 13548.0 0.0 953.0 exact min 13552.89499057571 13553.027666254291 1.0 1.0 7.292899748174851 1.0
86 84 ml-exact 34 10.275269031524658 13557.0 13557.0 0.0 80.0 exact min 13543.573426467052 13543.720418548583 1.0 1.0 4.63916104661852 1.0
87 85 ml-exact 35 13.136114120483398 13568.0 13568.0 0.0 445.0 exact min 13544.42084138602 13544.566531976374 1.0 1.0000000002137304 12.587970833538003 1.0
88 86 ml-exact 36 11.606173992156982 13553.999998743056 13553.999998743056 0.0 164.0 exact min 13525.354005709218 13525.528979851062 0.9999999999072641 1.0 15.660551479908223 1.3015873015873016
89 87 ml-exact 37 15.051767110824585 13532.0 13532.0 0.0 107.0 exact min 13539.336351872207 13539.489851409624 1.0 1.0 6.004187792432415 1.0
90 88 ml-exact 38 10.445327997207642 13514.0 13514.0 0.0 1.0 exact min 13529.591080304062 13529.75954699002 1.0 1.0 6.978136144027363 1.0
91 89 ml-exact 39 12.747802019119263 13538.0 13538.0 0.0 151.0 exact min 13537.217814574784 13537.374567840145 1.0 1.0 7.472058053477855 1.0
92 90 ml-exact 40 14.315036058425903 13578.0 13578.0 0.0 1.0 exact min 13566.877336738698 13566.988537812853 1.0 1.0 7.670033521642671 1.0
93 91 ml-exact 41 10.27357292175293 13525.0 13526.0 7.393715341959335e-05 1.0 exact min 13521.540638573859 13521.721469426 0.9999260683128789 1.0 4.063188513593281 inf 1.0
94 92 ml-exact 42 12.76089596748352 13529.0 13529.0 0.0 47.0 exact min 13530.014787763548 13530.182603703915 0.9999999999999999 1.0 4.534313469262858 1.0
95 93 ml-exact 43 16.610208988189697 13565.0 13565.0 0.0 267.0 exact min 13560.945432305916 13561.065743818312 1.0 1.0 2.2775372615612164 1.0
96 94 ml-exact 44 9.052951097488403 13553.0 13553.0 0.0 1.0 exact min 13571.114411333543 13571.219104951811 1.0 1.0 5.050134356975125 1.0
97 95 ml-exact 45 12.605960130691528 13521.0 13521.0 0.0 1.0 exact min 13518.998393816952 13519.183129142624 1.0 1.0 5.036547652194804 1.0
98 96 ml-exact 46 12.235252141952515 13543.0 13543.0 0.0 1.0 exact min 13543.149719007566 13543.297361834688 1.0 1.0000000000438993 7.870849996027641 1.0
99 97 ml-exact 47 11.854049921035767 13564.0 13564.0 0.0 1.0 exact min 13567.301044198182 13567.41159452675 1.0 1.0 3.8200977469489614 1.0
100 98 ml-exact 48 11.03400993347168 13551.999999999978 13552.000000000004 1.8791212846548136e-15 1.0 exact min 13558.403187549009 13558.527403534938 0.9999999999999983 1.0000000000000002 6.005576310930073 inf 1.0
101 99 ml-exact 49 10.517628908157349 13524.0 13524.0 0.0 547.0 exact min 13501.626387978087 13501.837803872895 1.0 1.0 3.757023254910798 1.2101769911504425
102 100 ml-heuristic 0 1.1019139289855957 13540.0 13540.0 0.0 787.0 heuristic min 13534.675569817877 13534.83622755677 1.0 1.0 1.0 787.0
103 101 ml-heuristic 1 0.503026008605957 13567.0 13567.0 0.0 142.0 heuristic min 13566.029921819729 13566.142424385062 1.0 1.0 1.0 142.0
104 102 ml-heuristic 2 1.815993070602417 13563.0 13563.0 0.0 1640.0 heuristic min 13545.26825630499 13545.412645404165 1.0 1.000073735437251 1.0 4.039408866995074
105 103 ml-heuristic 3 3.6015830039978027 13522.0 13522.0 0.0 1.0 heuristic min 13513.490196843653 13513.683391861978 1.0 1.0 1.0 1.0
106 104 ml-heuristic 4 0.8963778018951416 13534.0 13534.0 0.0 261.0 heuristic min 13552.471283116225 13552.604609540394 1.0 1.0 1.0 1.1347826086956523
107 105 ml-heuristic 5 1.685107946395874 13532.0 13532.0 0.0 265.0 heuristic min 13557.55577263004 13557.681290107144 1.0 1.0 1.0 5.888888888888889
108 106 ml-heuristic 6 1.803605079650879 13534.999999068532 13534.999999068534 1.343915333336563e-16 713.0 heuristic min 13536.370399655816 13536.528454412353 0.9999999999311808 1.0 1.0 inf 1.0
109 107 ml-heuristic 7 1.6335430145263672 13613.0 13613.0 0.0 519.0 heuristic min 13595.689443983643 13595.75639435777 1.0 1.0 1.0 2.0513833992094863
110 108 ml-heuristic 8 2.3025331497192383 13580.0 13580.0 0.0 1442.0 heuristic min 13588.910124631891 13588.987486935437 1.0 1.0000000001744203 1.0 -0.0 2.552212389380531
111 109 ml-heuristic 9 5.6919379234313965 13584.0 13584.0 0.0 1142.0 heuristic min 13569.84328895509 13569.949934810124 1.0 1.0000000004044145 1.0 4.443579766536965
112 110 ml-heuristic 10 2.931243896484375 13577.0 13578.0 7.365397363187744e-05 1123.0 heuristic min 13568.148459117152 13568.25770795454 0.9999263514508764 1.0 1.0 inf 1.7330246913580247
113 111 ml-heuristic 11 1.5754199028015137 13574.0 13574.999998324447 7.367012851385044e-05 3.0 heuristic min 13564.758799441275 13564.873254243374 0.9999263353233345 1.0000000000249623 1.0 inf 3.0
114 112 ml-heuristic 12 0.8674600124359131 13544.0 13544.0 0.0 200.0 heuristic min 13538.912644412721 13539.06679469573 1.0 1.0 1.0 200.0
115 113 ml-heuristic 13 2.3797359466552734 13534.0 13534.0 0.0 39.0 heuristic min 13559.674309927463 13559.796573676624 1.0 1.0 1.0 39.0
116 114 ml-heuristic 14 2.257524013519287 13551.0 13551.0 0.0 690.0 heuristic min 13548.657915980866 13548.797099115332 1.0 1.0 1.0 9.583333333333334
117 115 ml-heuristic 15 2.4494469165802 13593.0 13594.0 7.356727727506805e-05 1161.0 heuristic min 13560.52172484643 13560.642687104417 0.9999264381344711 1.0 1.0 inf 3.2889518413597734
118 116 ml-heuristic 16 1.4570538997650146 13594.0 13594.0 0.0 401.0 heuristic min 13552.89499057571 13553.027666254291 1.0 1.0 1.0 1.0
119 117 ml-heuristic 17 2.2187118530273438 13543.0 13543.0 0.0 234.0 heuristic min 13535.522984736846 13535.682340984562 1.0 1.0 1.0 2.146788990825688
120 118 ml-heuristic 18 0.4319300651550293 13525.0 13525.0 0.0 1.0 heuristic min 13525.777713168703 13525.952036564957 1.0 1.0 1.0 1.0
121 119 ml-heuristic 19 1.4861400127410889 13564.0 13564.0 0.0 466.0 heuristic min 13560.098017386947 13560.21963039052 1.0 1.0 1.0 23.3
122 120 ml-heuristic 20 1.2517409324645996 13569.0 13569.0 0.0 274.0 heuristic min 13549.92903835932 13550.06626925702 1.0 1.0 1.0 274.0
123 121 ml-heuristic 21 1.4341230392456055 13566.0 13566.0 0.0 476.0 heuristic min 13553.742405494679 13553.873779682082 1.0 1.0 1.0 1.934959349593496
124 122 ml-heuristic 22 1.2694261074066162 13564.0 13565.0 7.372456502506635e-05 22.0 heuristic min 13551.200160737772 13551.335439398708 0.9999262808698858 1.0 1.0 inf 22.0
125 123 ml-heuristic 23 2.0170810222625732 13580.0 13580.0 0.0 306.0 heuristic min 13560.945432305916 13561.065743818312 1.0 1.0 1.0 5.464285714285714
126 124 ml-heuristic 24 1.632323980331421 13543.0 13543.0 0.0 328.0 heuristic min 13545.691963764473 13545.835702118062 1.0 1.0000000000000002 1.0 328.0
127 125 ml-heuristic 25 1.3537018299102783 13542.0 13542.0 0.0 153.0 heuristic min 13528.31995792561 13528.490376848333 1.0 1.0 1.0 153.0
128 126 ml-heuristic 26 2.2375080585479736 13532.0 13532.0 0.0 1.0 heuristic min 13538.488936953237 13538.643737981833 1.0 1.0000000000460052 1.0 1.0
129 127 ml-heuristic 27 1.422248125076294 13522.0 13522.0 0.0 258.0 heuristic min 13537.641522034268 13537.797624554041 1.0 1.0 1.0 258.0
130 128 ml-heuristic 28 1.0971989631652832 13570.0 13571.0 7.369196757553427e-05 130.0 heuristic min 13560.098017386947 13560.21963039052 0.9999263134625304 1.0 1.0 inf 8.666666666666666
131 129 ml-heuristic 29 1.157463788986206 13595.0 13595.0 0.0 1.0 heuristic min 13571.961826252513 13572.065218379603 1.0 1.0000000010215446 1.0 1.0
132 130 ml-heuristic 30 1.841322898864746 13577.0 13577.0 0.0 207.0 heuristic min 13559.250602467977 13559.373516962729 1.0 1.0 1.0 8.28
133 131 ml-heuristic 31 2.887389898300171 13582.0 13582.0 0.0 1061.0 heuristic min 13583.401927658593 13583.48774965479 1.0 1.0 1.0 35.36666666666667
134 132 ml-heuristic 32 0.7486422061920166 13523.0 13524.0 7.394808844191378e-05 66.0 heuristic min 13535.522984736846 13535.682340984562 0.9999260573794735 1.0 1.0 inf 1.0
135 133 ml-heuristic 33 2.0524861812591553 13548.0 13548.0 0.0 1437.0 heuristic min 13552.89499057571 13553.027666254291 1.0 1.0 1.0 1.5078698845750262
136 134 ml-heuristic 34 2.214898109436035 13557.0 13557.0 0.0 373.0 heuristic min 13543.573426467052 13543.720418548583 1.0 1.0 1.0 4.6625
137 135 ml-heuristic 35 1.0435450077056885 13568.0 13568.0 0.0 623.0 heuristic min 13544.42084138602 13544.566531976374 1.0 1.0000000002137304 1.0 1.4
138 136 ml-heuristic 36 0.7411088943481445 13554.0 13554.0 0.0 126.0 heuristic min 13525.354005709218 13525.528979851062 1.0 1.000000000092736 1.0 1.0
139 137 ml-heuristic 37 2.506878137588501 13532.0 13532.0 0.0 733.0 heuristic min 13539.336351872207 13539.489851409624 1.0 1.0 1.0 6.850467289719626
140 138 ml-heuristic 38 1.4968650341033936 13514.0 13514.0 0.0 87.0 heuristic min 13529.591080304062 13529.75954699002 1.0 1.0 1.0 87.0
141 139 ml-heuristic 39 1.7060630321502686 13538.0 13538.0 0.0 235.0 heuristic min 13537.217814574784 13537.374567840145 1.0 1.0 1.0 1.5562913907284768
142 140 ml-heuristic 40 1.866358995437622 13577.0 13578.000000000002 7.365397363201142e-05 15.0 heuristic min 13566.877336738698 13566.988537812853 0.9999263514508764 1.0000000000000002 1.0 inf 15.0
143 141 ml-heuristic 41 2.5284509658813477 13526.0 13526.0 0.0 217.0 heuristic min 13521.540638573859 13521.721469426 1.0 1.0 1.0 217.0
144 142 ml-heuristic 42 2.8142950534820557 13529.000000000002 13529.000000000002 0.0 201.0 heuristic min 13530.014787763548 13530.182603703915 1.0 1.0000000000000002 1.0 4.276595744680851
145 143 ml-heuristic 43 7.293056964874268 13565.0 13565.0 0.0 1485.0 heuristic min 13560.945432305916 13561.065743818312 1.0 1.0 1.0 5.561797752808989
146 144 ml-heuristic 44 1.7926158905029297 13553.0 13553.0 0.0 1.0 heuristic min 13571.114411333543 13571.219104951811 1.0 1.0 1.0 1.0
147 145 ml-heuristic 45 2.502897024154663 13521.0 13521.0 0.0 68.0 heuristic min 13518.998393816952 13519.183129142624 1.0 1.0 1.0 68.0
148 146 ml-heuristic 46 1.554502010345459 13543.0 13543.0 0.0 157.0 heuristic min 13543.149719007566 13543.297361834688 1.0 1.0000000000438993 1.0 157.0
149 147 ml-heuristic 47 3.1030750274658203 13564.0 13564.0 0.0 137.0 heuristic min 13567.301044198182 13567.41159452675 1.0 1.0 1.0 137.0
150 148 ml-heuristic 48 1.837294101715088 13552.0 13552.0 0.0 48.0 heuristic min 13558.403187549009 13558.527403534938 1.0 1.0 1.0 48.0
151 149 ml-heuristic 49 2.7994580268859863 13524.0 13524.0 0.0 452.0 heuristic min 13501.626387978087 13501.837803872895 1.0 1.0 1.0 1.0

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

14
docs/Makefile Normal file
View File

@@ -0,0 +1,14 @@
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = _build
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

7
docs/_static/custom.css vendored Normal file
View File

@@ -0,0 +1,7 @@
h1.site-logo {
font-size: 30px !important;
}
h1.site-logo small {
font-size: 20px !important;
}

View File

@@ -1,19 +1,34 @@
```{sectnum}
---
start: 4
depth: 2
suffix: .
---
```
# About
### Authors
## Authors
* **Alinson S. Xavier,** Argonne National Laboratory <<axavier@anl.gov>>
* **Feng Qiu,** Argonne National Laboratory <<fqiu@anl.gov>>
### Acknowledgments
## Acknowledgments
* Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.
* Based upon work supported by **Laboratory Directed Research and Development** (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357, and the **U.S. Department of Energy Advanced Grid Modeling Program** under Grant DE-OE0000875.
### References
## References
* **Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems.** *Alinson S. Xavier, Feng Qiu, Shabbir Ahmed*. INFORMS Journal on Computing (to appear). [ArXiv:1902:01696](https://arxiv.org/abs/1902.01697)
### License
If you use MIPLearn in your research, or the included problem generators, we kindly request that you cite the package as follows:
* **Alinson S. Xavier, Feng Qiu.** *MIPLearn: An Extensible Framework for Learning-Enhanced Optimization*. Zenodo (2020). DOI: [10.5281/zenodo.4287567](https://doi.org/10.5281/zenodo.4287567)
If you use MIPLearn in the field of power systems optimization, we kindly request that you cite the reference below, in which the main techniques implemented in MIPLearn were first developed:
* **Alinson S. Xavier, Feng Qiu, Shabbir Ahmed.** *Learning to Solve Large-Scale Unit Commitment Problems.* INFORMS Journal on Computing (2020). DOI: [10.1287/ijoc.2020.0976](https://doi.org/10.1287/ijoc.2020.0976)
## License
```text
MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization

View File

@@ -1,61 +1,177 @@
# Benchmarks Utilities
### Using `BenchmarkRunner`
MIPLearn provides the utility class `BenchmarkRunner`, which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage:
```python
from miplearn import BenchmarkRunner, LearningSolver
# Create train and test instances
train_instances = [...]
test_instances = [...]
# Training phase...
training_solver = LearningSolver(...)
training_solver.parallel_solve(train_instances, n_jobs=10)
# Test phase...
test_solvers = {
"Baseline": LearningSolver(...), # each solver may have different parameters
"Strategy A": LearningSolver(...),
"Strategy B": LearningSolver(...),
"Strategy C": LearningSolver(...),
}
benchmark = BenchmarkRunner(test_solvers)
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=2)
print(benchmark.raw_results())
```{sectnum}
---
start: 2
depth: 2
suffix: .
---
```
The method `fit` trains the ML models for each individual solver. The method `parallel_solve` solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, `raw_results` produces a table of results (Pandas DataFrame) with the following columns:
# Benchmarks
* **Solver,** the name of the solver.
* **Instance,** the sequence number identifying the instance.
* **Wallclock Time,** the wallclock running time (in seconds) spent by the solver;
* **Lower Bound,** the best lower bound obtained by the solver;
* **Upper Bound,** the best upper bound obtained by the solver;
* **Gap,** the relative MIP integrality gap at the end of the optimization;
* **Nodes,** the number of explored branch-and-bound nodes.
MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for `LearningSolver` with default parameters.
In addition to the above, there is also a "Relative" version of most columns, where the raw number is compared to the solver which provided the best performance. The *Relative Wallclock Time* for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.
## Preliminaries
### Benchmark challenges
When evaluating the performance of a conventional MIP solver, *benchmark sets*, such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the *training instances*) before solving the actual set of instances we are interested in (the *test instances*). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits.
In MIPLearn, each optimization problem comes with a set of **benchmark challenges**, which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.
### Baseline results
To illustrate the performance of `LearningSolver`, and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see [references](about.md#references). We compare three solvers:
* **baseline:** Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver)
* **ml-exact:** `LearningSolver` with default settings, using Gurobi 9.0 as internal MIP solver
* **ml-heuristic:** Same as above, but with `mode="heuristic"`
All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.
### Saving and loading benchmark results
When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. `BenchmarkRunner` provides the methods `save_results` and `load_results`, which can be used to avoid this repetition, as the next example shows:
## Maximum Weight Stable Set Problem
### Problem definition
Given a simple undirected graph $G=(V,E)$ and weights $w \in \mathbb{R}^V$, the problem is to find a stable set $S \subseteq V$ that maximizes $ \sum_{v \in V} w_v$. We recall that a subset $S \subseteq V$ is a *stable set* if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.
### Random instance generator
The class `MaxWeightStableSetGenerator` can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter `fix_graph=True` is provided, one random Erdős-Rényi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions `n` and `p`. To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution `w`. When `fix_graph=False`, a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.
### Challenge A
* Fixed random Erdős-Rényi graph $G_{n,p}$ with $n=200$ and $p=5\%$
* Random vertex weights $w_v \sim U(100, 150)$
* 500 training instances, 50 test instances
```python
# Benchmark baseline solvers and save results to a file.
benchmark = BenchmarkRunner(baseline_solvers)
benchmark.parallel_solve(test_instances)
benchmark.save_results("baseline_results.csv")
# Benchmark remaining solvers, loading baseline results from file.
benchmark = BenchmarkRunner(alternative_solvers)
benchmark.load_results("baseline_results.csv")
benchmark.fit(training_instances)
benchmark.parallel_solve(test_instances)
MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
n=randint(low=200, high=201),
p=uniform(loc=0.05, scale=0.0),
fix_graph=True)
```
![alt](figures/benchmark_stab_a.png)
## Traveling Salesman Problem
### Problem definition
Given a list of cities and the distance between each pair of cities, the problem asks for the
shortest route starting at the first city, visiting each other city exactly once, then returning
to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's
21 NP-complete problems.
### Random problem generator
The class `TravelingSalesmanGenerator` can be used to generate random instances of this
problem. Initially, the generator creates $n$ cities $(x_1,y_1),\ldots,(x_n,y_n) \in \mathbb{R}^2$,
where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions `n`,
`x` and `y`. For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to:
$$
d_{i,j} = \gamma_{i,j} \sqrt{(x_i-x_j)^2 + (y_i - y_j)^2}
$$
where $\gamma_{i,j}$ is sampled from the distribution `gamma`.
If `fix_cities=True` is provided, the list of cities is kept the same for all generated instances.
The $gamma$ values, and therefore also the distances, are still different.
By default, all distances $d_{i,j}$ are rounded to the nearest integer. If `round=False`
is provided, this rounding will be disabled.
### Challenge A
* Fixed list of 350 cities in the $[0, 1000]^2$ square
* $\gamma_{i,j} \sim U(0.95, 1.05)$
* 500 training instances, 50 test instances
```python
TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
y=uniform(loc=0.0, scale=1000.0),
n=randint(low=350, high=351),
gamma=uniform(loc=0.95, scale=0.1),
fix_cities=True,
round=True,
)
```
![alt](figures/benchmark_tsp_a.png)
## Multidimensional 0-1 Knapsack Problem
### Problem definition
Given a set of $n$ items and $m$ types of resources (also called *knapsacks*), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is:
$$
\begin{align*}
\text{maximize}
& \sum_{j=1}^n p_j x_j
\\
\text{subject to}
& \sum_{j=1}^n w_{ij} x_j \leq b_i
& \forall i=1,\ldots,m \\
& x_j \in \{0,1\}
& \forall j=1,\ldots,n
\end{align*}
$$
### Random instance generator
The class `MultiKnapsackGenerator` can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions `n` and `m`. The weights $w_{ij}$ are sampled independently from the provided distribution `w`. The capacity of knapsack $i$ is set to
$$
b_i = \alpha_i \sum_{j=1}^n w_{ij}
$$
where $\alpha_i$, the tightness ratio, is sampled from the provided probability
distribution `alpha`. To make the instances more challenging, the costs of the items
are linearly correlated to their average weights. More specifically, the price of each
item $j$ is set to:
$$
p_j = \sum_{i=1}^m \frac{w_{ij}}{m} + K u_j,
$$
where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled
from the provided probability distributions `K` and `u`.
If `fix_w=True` is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as `u` and `K` are not constants, the generated instances will still not be completely identical.
If a probability distribution `w_jitter` is provided, then item weights will be set to $w_{ij} \gamma_{ij}$ where $\gamma_{ij}$ is sampled from `w_jitter`. When combined with `fix_w=True`, this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead.
By default, all generated prices, weights and capacities are rounded to the nearest integer number. If `round=False` is provided, this rounding will be disabled.
!!! note "References"
* Freville, Arnaud, and Gérard Plateau. *An efficient preprocessing procedure for the multidimensional 01 knapsack problem.* Discrete applied mathematics 49.1-3 (1994): 189-212.
* Fréville, Arnaud. *The multidimensional 01 knapsack problem: An overview.* European Journal of Operational Research 155.1 (2004): 1-21.
### Challenge A
* 250 variables, 10 constraints, fixed weights
* $w \sim U(0, 1000), \gamma \sim U(0.95, 1.05)$
* $K = 500, u \sim U(0, 1), \alpha = 0.25$
* 500 training instances, 50 test instances
```python
MultiKnapsackGenerator(n=randint(low=250, high=251),
m=randint(low=10, high=11),
w=uniform(loc=0.0, scale=1000.0),
K=uniform(loc=500.0, scale=0.0),
u=uniform(loc=0.0, scale=1.0),
alpha=uniform(loc=0.25, scale=0.0),
fix_w=True,
w_jitter=uniform(loc=0.95, scale=0.1),
)
```
![alt](figures/benchmark_knapsack_a.png)

16
docs/conf.py Normal file
View File

@@ -0,0 +1,16 @@
project = "MIPLearn"
copyright = "2020-2021, UChicago Argonne, LLC"
author = ""
release = "0.2.0"
extensions = ["myst_parser"]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_theme = "sphinx_book_theme"
html_static_path = ["_static"]
html_css_files = ["custom.css"]
html_theme_options = {
"repository_url": "https://github.com/ANL-CEEESA/MIPLearn/",
"use_repository_button": True,
"extra_navbar": "",
}
html_title = f"MIPLearn<br/><small>{release}</small>"

View File

@@ -1,28 +0,0 @@
.navbar-default {
border-bottom: 0px;
background-color: #fff;
box-shadow: 0px 0px 15px rgba(0, 0, 0, 0.2);
}
a, .navbar-default a {
color: #06a !important;
font-weight: normal;
}
.disabled > a {
color: #999 !important;
}
.navbar-default a:hover,
.navbar-default .active,
.active > a {
background-color: #f0f0f0 !important;
}
.icon-bar {
background-color: #666 !important;
}
.navbar-collapse {
border-color: #fff !important;
}

View File

@@ -1,18 +1,47 @@
```{sectnum}
---
start: 3
depth: 2
suffix: .
---
```
# Customization
## Customizing solver parameters
### Selecting the internal MIP solver
By default, `LearningSolver` uses [Gurobi](https://www.gurobi.com/) as its internal MIP solver. Another supported solver is [IBM ILOG CPLEX](https://www.ibm.com/products/ilog-cplex-optimization-studio). To switch between solvers, use the `solver` constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance.
By default, `LearningSolver` uses [Gurobi](https://www.gurobi.com/) as its internal MIP solver, and expects models to be provided using the Pyomo modeling language. Supported solvers and modeling languages include:
* `GurobiPyomoSolver`: Gurobi with Pyomo (default).
* `CplexPyomoSolver`: [IBM ILOG CPLEX](https://www.ibm.com/products/ilog-cplex-optimization-studio) with Pyomo.
* `XpressPyomoSolver`: [FICO XPRESS Solver](https://www.fico.com/en/products/fico-xpress-solver) with Pyomo.
* `GurobiSolver`: Gurobi without any modeling language.
To switch between solvers, provide the desired class using the `solver` argument:
```python
from miplearn import LearningSolver
solver = LearningSolver(solver="cplex",
time_limit=300,
gap_tolerance=1e-3)
from miplearn import LearningSolver, CplexPyomoSolver
solver = LearningSolver(solver=CplexPyomoSolver)
```
To configure a particular solver, use the `params` constructor argument, as shown below.
```python
from miplearn import LearningSolver, GurobiPyomoSolver
solver = LearningSolver(
solver=lambda: GurobiPyomoSolver(
params={
"TimeLimit": 900,
"MIPGap": 1e-3,
"NodeLimit": 1000,
}
),
)
```
## Customizing solver components
`LearningSolver` is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default:
@@ -40,30 +69,26 @@ solver2 = LearningSolver(components=[
])
```
It is also possible to add components to an existing solver using the `solver.add` method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one.
```python
# Create solver with default components
solver = LearningSolver()
# Replace the default LazyConstraintComponent by one with custom parameters
solver.add(LazyConstraintComponent(...))
```
### Adjusting component aggressiveness
The aggressiveness of classification components (such as `PrimalSolutionComponent` and `LazyConstraintComponent`) can
be adjusted through the `threshold` constructor argument. Internally, these components ask the ML models how confident
they are on each prediction (through the `predict_proba` method in the sklearn API), and only take into account
predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness,
while raising a component's threshold makes it more conservative.
The aggressiveness of classification components, such as `PrimalSolutionComponent` and `LazyConstraintComponent`, can be adjusted through the `threshold` constructor argument. Internally, these components ask the machine learning models how confident are they on each prediction they make, then automatically discard all predictions that have low confidence. The `threshold` argument specifies how confident should the ML models be for a prediction to be considered trustworthy. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative.
MIPLearn also includes `MinPrecisionThreshold`, a dynamic threshold which adjusts itself automatically during training
to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize
a `PrimalSolutionComponent` which achieves 95% precision, possibly at the cost of a lower recall. To make the component
more aggressive, this precision may be lowered.
For example, if the ML model predicts that a certain binary variable will assume value `1.0` in the optimal solution with 75% confidence, and if the `PrimalSolutionComponent` is configured to discard all predictions with less than 90% confidence, then this variable will not be included in the predicted MIP start.
MIPLearn currently provides two types of thresholds:
* `MinProbabilityThreshold(p: List[float])` A threshold which indicates that a prediction is trustworthy if its probability of being correct, as computed by the machine learning model, is above a fixed value.
* `MinPrecisionThreshold(p: List[float])` A dynamic threshold which automatically adjusts itself during training to ensure that the component achieves at least a given precision on the training data set. Note that increasing a component's precision may reduce its recall.
The example below shows how to build a `PrimalSolutionComponent` which fixes variables to zero with at least 80% precision, and to one with at least 95% precision. Other components are configured similarly.
```python
PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95))
from miplearn import PrimalSolutionComponent, MinPrecisionThreshold
PrimalSolutionComponent(
mode="heuristic",
threshold=MinPrecisionThreshold([0.80, 0.95]),
)
```
### Evaluating component performance
@@ -140,25 +165,18 @@ dtype: float64
### Using customized ML classifiers and regressors
By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then
selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component
should use through the `classifier` or `regressor` contructor parameters. The provided classifiers and regressors must
follow the sklearn API. In particular, classifiers must provide the methods `fit`, `predict_proba` and `predict`,
while regressors must provide the methods `fit` and `predict`
By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the `classifier` or `regressor` contructor parameters. Scikit-learn classifiers and regressors are currently supported. A future version of the package will add compatibility with Keras models.
!!! danger
MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through
the standard `copy.deepcopy` method. This currently makes it incompatible with Keras and TensorFlow
predictors. This is a known limitation, which will be addressed in a future version.
The example below shows how to construct a `PrimalSolutionComponent` which internally uses
sklearn's `KNeighborsClassifiers`. Any other sklearn classifier or pipeline can be used.
The example below shows how to construct a `PrimalSolutionComponent` which internally uses scikit-learn's `KNeighborsClassifiers`. Any other scikit-learn classifier or pipeline can be used. It needs to be wrapped in `ScikitLearnClassifier` to ensure that all the proper data transformations are applied.
```python
from miplearn import PrimalSolutionComponent
from miplearn import PrimalSolutionComponent, ScikitLearnClassifier
from sklearn.neighbors import KNeighborsClassifier
comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5))
comp = PrimalSolutionComponent(
classifier=ScikitLearnClassifier(
KNeighborsClassifier(n_neighbors=5),
),
)
comp.fit(train_instances)
```

View File

@@ -1 +0,0 @@
../../benchmark/knapsack/ChallengeA/performance.png

View File

@@ -1 +0,0 @@
../../benchmark/stab/ChallengeA/performance.png

View File

@@ -1 +0,0 @@
../../benchmark/tsp/ChallengeA/performance.png

View File

@@ -1,10 +1,13 @@
# MIPLearn
**MIPLearn** is an extensible framework for **Learning-Enhanced Mixed-Integer Optimization**, an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data.
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Linear Programming (MIP) and Machine Learning (ML). The framework uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers (such as CPLEX, Gurobi or XPRESS).
The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see [benchmark results](problems.md) and [references](about.md#references) for more details).
Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions.
Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value).
### Features
For certain classes of problems, this approach has been shown to provide significant performance benefits (see [benchmarks](benchmark.md) and [references](about.md)).
## Features
* **MIPLearn proposes a flexible problem specification format,** which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver.
@@ -14,15 +17,19 @@ The package uses Machine Learning (ML) to automatically identify patterns in pre
* **MIPLearn is customizable and extensible**. For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized.
### Documentation
## Site contents
* [Installation and typical usage](usage.md)
* [Benchmark utilities](benchmark.md)
* [Benchmark problems, challenges and results](problems.md)
* [Customizing the solver](customization.md)
* [License, authors, references and acknowledgments](about.md)
```{toctree}
---
maxdepth: 2
---
usage.md
benchmark.md
customization.md
about.md
```
### Source Code
## Source Code
* [https://github.com/ANL-CEEESA/MIPLearn](https://github.com/ANL-CEEESA/MIPLearn)

View File

@@ -1,8 +0,0 @@
MathJax.Hub.Config({
"tex2jax": { inlineMath: [ [ '$', '$' ] ] }
});
MathJax.Hub.Config({
config: ["MMLorHTML.js"],
jax: ["input/TeX", "output/HTML-CSS", "output/NativeMML"],
extensions: ["MathMenu.js", "MathZoom.js"]
});

View File

@@ -1,167 +0,0 @@
# Benchmark Problems, Challenges and Results
MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for `LearningSolver` with default parameters.
## Preliminaries
### Benchmark challenges
When evaluating the performance of a conventional MIP solver, *benchmark sets*, such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the *training instances*) before solving the actual set of instances we are interested in (the *test instances*). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits.
In MIPLearn, each optimization problem comes with a set of **benchmark challenges**, which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.
### Baseline results
To illustrate the performance of `LearningSolver`, and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see [references](about.md#references). We compare three solvers:
* **baseline:** Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver)
* **ml-exact:** `LearningSolver` with default settings, using Gurobi 9.0 as internal MIP solver
* **ml-heuristic:** Same as above, but with `mode="heuristic"`
All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.
## Maximum Weight Stable Set Problem
### Problem definition
Given a simple undirected graph $G=(V,E)$ and weights $w \in \mathbb{R}^V$, the problem is to find a stable set $S \subseteq V$ that maximizes $ \sum_{v \in V} w_v$. We recall that a subset $S \subseteq V$ is a *stable set* if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.
### Random instance generator
The class `MaxWeightStableSetGenerator` can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter `fix_graph=True` is provided, one random Erdős-Rényi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions `n` and `p`. To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution `w`. When `fix_graph=False`, a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.
### Challenge A
* Fixed random Erdős-Rényi graph $G_{n,p}$ with $n=200$ and $p=5\%$
* Random vertex weights $w_v \sim U(100, 150)$
* 500 training instances, 50 test instances
```python
MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
n=randint(low=200, high=201),
p=uniform(loc=0.05, scale=0.0),
fix_graph=True)
```
![alt](figures/benchmark_stab_a.png)
## Traveling Salesman Problem
### Problem definition
Given a list of cities and the distance between each pair of cities, the problem asks for the
shortest route starting at the first city, visiting each other city exactly once, then returning
to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's
21 NP-complete problems.
### Random problem generator
The class `TravelingSalesmanGenerator` can be used to generate random instances of this
problem. Initially, the generator creates $n$ cities $(x_1,y_1),\ldots,(x_n,y_n) \in \mathbb{R}^2$,
where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions `n`,
`x` and `y`. For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to:
$$
d_{i,j} = \gamma_{i,j} \sqrt{(x_i-x_j)^2 + (y_i - y_j)^2}
$$
where $\gamma_{i,j}$ is sampled from the distribution `gamma`.
If `fix_cities=True` is provided, the list of cities is kept the same for all generated instances.
The $gamma$ values, and therefore also the distances, are still different.
By default, all distances $d_{i,j}$ are rounded to the nearest integer. If `round=False`
is provided, this rounding will be disabled.
### Challenge A
* Fixed list of 350 cities in the $[0, 1000]^2$ square
* $\gamma_{i,j} \sim U(0.95, 1.05)$
* 500 training instances, 50 test instances
```python
TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
y=uniform(loc=0.0, scale=1000.0),
n=randint(low=350, high=351),
gamma=uniform(loc=0.95, scale=0.1),
fix_cities=True,
round=True,
)
```
![alt](figures/benchmark_tsp_a.png)
## Multidimensional 0-1 Knapsack Problem
### Problem definition
Given a set of $n$ items and $m$ types of resources (also called *knapsacks*), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is:
\begin{align*}
\text{maximize}
& \sum_{j=1}^n p_j x_j
\\
\text{subject to}
& \sum_{j=1}^n w_{ij} x_j \leq b_i
& \forall i=1,\ldots,m \\
& x_j \in \{0,1\}
& \forall j=1,\ldots,n
\end{align*}
### Random instance generator
The class `MultiKnapsackGenerator` can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions `n` and `m`. The weights $w_{ij}$ are sampled independently from the provided distribution `w`. The capacity of knapsack $i$ is set to
$$
b_i = \alpha_i \sum_{j=1}^n w_{ij}
$$
where $\alpha_i$, the tightness ratio, is sampled from the provided probability
distribution `alpha`. To make the instances more challenging, the costs of the items
are linearly correlated to their average weights. More specifically, the price of each
item $j$ is set to:
$$
p_j = \sum_{i=1}^m \frac{w_{ij}}{m} + K u_j,
$$
where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled
from the provided probability distributions `K` and `u`.
If `fix_w=True` is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as `u` and `K` are not constants, the generated instances will still not be completely identical.
If a probability distribution `w_jitter` is provided, then item weights will be set to $w_{ij} \gamma_{ij}$ where $\gamma_{ij}$ is sampled from `w_jitter`. When combined with `fix_w=True`, this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead.
By default, all generated prices, weights and capacities are rounded to the nearest integer number. If `round=False` is provided, this rounding will be disabled.
!!! note "References"
* Freville, Arnaud, and Gérard Plateau. *An efficient preprocessing procedure for the multidimensional 01 knapsack problem.* Discrete applied mathematics 49.1-3 (1994): 189-212.
* Fréville, Arnaud. *The multidimensional 01 knapsack problem: An overview.* European Journal of Operational Research 155.1 (2004): 1-21.
### Challenge A
* 250 variables, 10 constraints, fixed weights
* $w \sim U(0, 1000), \gamma \sim U(0.95, 1.05)$
* $K = 500, u \sim U(0, 1), \alpha = 0.25$
* 500 training instances, 50 test instances
```python
MultiKnapsackGenerator(n=randint(low=250, high=251),
m=randint(low=10, high=11),
w=uniform(loc=0.0, scale=1000.0),
K=uniform(loc=500.0, scale=0.0),
u=uniform(loc=0.0, scale=1.0),
alpha=uniform(loc=0.25, scale=0.0),
fix_w=True,
w_jitter=uniform(loc=0.95, scale=0.1),
)
```
![alt](figures/benchmark_knapsack_a.png)

View File

@@ -1,13 +1,21 @@
# Usage
```{sectnum}
---
start: 1
depth: 2
suffix: .
---
```
## 1. Installation
# Using MIPLearn
In these docs, we describe the Python/Pyomo version of the package, although a [Julia/JuMP version](https://github.com/ANL-CEEESA/MIPLearn.jl) is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi.
## Installation
In these docs, we describe the Python/Pyomo version of the package, although a [Julia/JuMP version](https://github.com/ANL-CEEESA/MIPLearn.jl) is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX, Gurobi and XPRESS.
To install MIPLearn, run:
```bash
pip3 install miplearn
pip3 install --upgrade miplearn==0.2.*
```
After installation, the package `miplearn` should become available to Python. It can be imported
@@ -17,7 +25,7 @@ as follows:
import miplearn
```
## 2. Using `LearningSolver`
## Using `LearningSolver`
The main class provided by this package is `LearningSolver`, a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage:
@@ -46,7 +54,7 @@ for instance in test_instances:
In this example, we have two lists of user-provided instances: `training_instances` and `test_instances`. We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each `instance` object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call `solver.fit(training_instances)`. This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to `solver.solve(instance)` will automatically use the trained Machine Learning models to accelerate the solution process.
## 3. Describing problem instances
## Describing problem instances
Instances to be solved by `LearningSolver` must derive from the abstract class `miplearn.Instance`. The following three abstract methods must be implemented:
@@ -61,13 +69,13 @@ An optional method which can be implemented is `instance.get_variable_category(v
It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that `get_instance_features()` must always return arrays of same length for all relevant instances of the problem. Similarly, `get_variable_features(var_name, index)` must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance.
## 4. Describing lazy constraints
## Describing lazy constraints
For many MIP formulations, it is not desirable to add all constraints up-front, either because the total number of constraints is very large, or because some of the constraints, even in relatively small numbers, can still cause significant performance impact when added to the formulation. In these situations, it may be desirable to generate and add constraints incrementaly, during the solution process itself. Conventional MIP solvers typically start by solving the problem without any lazy constraints. Whenever a candidate solution is found, the solver finds all violated lazy constraints and adds them to the formulation. MIPLearn significantly accelerates this process by using ML to predict which lazy constraints should be enforced from the very beginning of the optimization process, even before a candidate solution is available.
MIPLearn supports two types of lazy constraints: through constraint annotations and through callbacks.
### 4.1 Adding lazy constraints through annotations
### Adding lazy constraints through annotations
The easiest way to create lazy constraints in MIPLearn is to add them to the model (just like any regular constraints), then annotate them as lazy, as described below. Just before the optimization starts, MIPLearn removes all lazy constraints from the model and places them in a lazy constraint pool. If any trained ML models are available, MIPLearn queries these models to decide which of these constraints should be moved back into the formulation. After this step, the optimization starts, and lazy constraints from the pool are added to the model in the conventional fashion.
@@ -84,7 +92,7 @@ An additional method that can be implemented is `get_lazy_constraint_category(ci
!!! warning
If two lazy constraints belong to the same category, their feature vectors should have the same length.
### 4.2 Adding lazy constraints through callbacks
### Adding lazy constraints through callbacks
Although convenient, the method described in the previous subsection still requires the generation of all lazy constraints ahead of time, which can be prohibitively expensive. An alternative method is through a lazy constraint callbacks, described below. During the solution process, MIPLearn will repeatedly call a user-provided function to identify any violated lazy constraints. If violated constraints are identified, MIPLearn will additionally call another user-provided function to generate the constraint and add it to the formulation.
@@ -101,28 +109,29 @@ Assuming that trained ML models are available, immediately after calling `solver
After the optimization process starts, MIPLearn will periodically call `find_violated_lazy_constraints` to verify if the current solution violates any lazy constraints. If any violated lazy constraints are found, MIPLearn will call the method `build_violated_lazy_constraints` and add the returned constraints to the formulation.
!!! note
When implementing `find_violated_lazy_constraints(self, model)`, the current solution may be accessed through `self.solution[var_name][index]`.
```{tip}
When implementing `find_violated_lazy_constraints(self, model)`, the current solution may be accessed through `self.solution[var_name][index]`.
```
## 5. Obtaining heuristic solutions
## Obtaining heuristic solutions
By default, `LearningSolver` uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts.
For more significant performance benefits, `LearningSolver` can also be configured to place additional trust in the Machine Learning predictors, by using the `mode="heuristic"` constructor argument. When operating in this mode, if a ML model is statistically shown (through *stratified k-fold cross validation*) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see [references](about.md#references) and [benchmark results](benchmark.md)).
!!! danger
The `heuristic` mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.
```{danger}
The `heuristic` mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.
```
## Scaling Up
## 6. Saving and loading solver state
### Saving and loading solver state
After solving a large number of training instances, it may be desirable to save the current state of `LearningSolver` to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard `pickle` module, as the following example illustrates:
After solving a large number of training instances, it may be desirable to save the current state of `LearningSolver` to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the the utility functions `write_pickle_gz` and `read_pickle_gz`, as the following example illustrates:
```python
from miplearn import LearningSolver
import pickle
from miplearn import LearningSolver, write_pickle_gz, read_pickle_gz
# Solve training instances
training_instances = [...]
@@ -134,12 +143,12 @@ for instance in training_instances:
solver.fit(training_instances)
# Save trained solver to disk
pickle.dump(solver, open("solver.pickle", "wb"))
write_pickle_gz(solver, "solver.pkl.gz")
# Application restarts...
# Load trained solver from disk
solver = pickle.load(open("solver.pickle", "rb"))
solver = read_pickle_gz("solver.pkl.gz")
# Solve additional instances
test_instances = [...]
@@ -148,9 +157,9 @@ for instance in test_instances:
```
## 7. Solving training instances in parallel
### Solving instances in parallel
In many situations, training and test instances can be solved in parallel to accelerate the training process. `LearningSolver` provides the method `parallel_solve(instances)` to easily achieve this:
In many situations, instances can be solved in parallel to accelerate the training process. `LearningSolver` provides the method `parallel_solve(instances)` to easily achieve this:
```python
from miplearn import LearningSolver
@@ -166,6 +175,72 @@ solver.parallel_solve(test_instances)
```
## 8. Current Limitations
### Solving instances from the disk
* Only binary and continuous decision variables are currently supported.
In all examples above, we have assumed that instances are available as Python objects, stored in memory. When problem instances are very large, or when there is a large number of problem instances, this approach may require an excessive amount of memory. To reduce memory requirements, MIPLearn can also operate on instances that are stored on disk, through the `PickleGzInstance` class, as the next example illustrates.
```python
import pickle
from miplearn import (
LearningSolver,
PickleGzInstance,
write_pickle_gz,
)
# Construct and pickle 600 problem instances
for i in range(600):
instance = MyProblemInstance([...])
write_pickle_gz(instance, "instance_%03d.pkl" % i)
# Split instances into training and test
test_instances = [PickleGzInstance("instance_%03d.pkl" % i) for i in range(500)]
train_instances = [PickleGzInstance("instance_%03d.pkl" % i) for i in range(500, 600)]
# Create solver
solver = LearningSolver([...])
# Solve training instances
solver.parallel_solve(train_instances, n_jobs=4)
# Train ML models
solver.fit(train_instances)
# Solve test instances
solver.parallel_solve(test_instances, n_jobs=4)
```
By default, `solve` and `parallel_solve` modify files in place. That is, after the instances are loaded from disk and solved, MIPLearn writes them back to the disk, overwriting the original files. To discard the modifications instead, use `LearningSolver(..., discard_outputs=True)`. This can be useful, for example, during benchmarks.
## Running benchmarks
MIPLearn provides the utility class `BenchmarkRunner`, which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage:
```python
from miplearn import BenchmarkRunner, LearningSolver
# Create train and test instances
train_instances = [...]
test_instances = [...]
# Training phase...
training_solver = LearningSolver(...)
training_solver.parallel_solve(train_instances, n_jobs=10)
# Test phase...
benchmark = BenchmarkRunner({
"Baseline": LearningSolver(...),
"Strategy A": LearningSolver(...),
"Strategy B": LearningSolver(...),
"Strategy C": LearningSolver(...),
})
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=5)
benchmark.write_csv("results.csv")
```
The method `fit` trains the ML models for each individual solver. The method `parallel_solve` solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, `write_csv` produces a table of results. The columns in the CSV file depend on the components added to the solver.
## Current Limitations
* Only binary and continuous decision variables are currently supported. General integer variables are not currently supported by some solver components.

View File

@@ -1,32 +1,29 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from .extractors import (SolutionExtractor,
InstanceFeaturesExtractor,
ObjectiveValueExtractor,
VariableFeaturesExtractor)
from .components.component import Component
from .components.objective import ObjectiveValueComponent
from .components.lazy_dynamic import DynamicLazyConstraintsComponent
from .components.lazy_static import StaticLazyConstraintsComponent
from .components.cuts import UserCutsComponent
from .components.primal import PrimalSolutionComponent
from .components.relaxation import RelaxationComponent
from .classifiers.adaptive import AdaptiveClassifier
from .classifiers.threshold import MinPrecisionThreshold
from .benchmark import BenchmarkRunner
from .instance import Instance
from .solvers.pyomo.base import BasePyomoSolver
from .solvers.pyomo.cplex import CplexPyomoSolver
from .solvers.pyomo.gurobi import GurobiPyomoSolver
from .classifiers import Classifier, Regressor
from .classifiers.adaptive import AdaptiveClassifier
from .classifiers.sklearn import ScikitLearnRegressor, ScikitLearnClassifier
from .classifiers.threshold import MinPrecisionThreshold
from .components.component import Component
from .components.dynamic_lazy import DynamicLazyConstraintsComponent
from .components.dynamic_user_cuts import UserCutsComponent
from .components.objective import ObjectiveValueComponent
from .components.primal import PrimalSolutionComponent
from .components.static_lazy import StaticLazyConstraintsComponent
from .instance.base import Instance
from .instance.picklegz import (
PickleGzInstance,
write_pickle_gz,
read_pickle_gz,
write_pickle_gz_multiple,
)
from .log import setup_logger
from .solvers.gurobi import GurobiSolver
from .solvers.internal import InternalSolver
from .solvers.learning import LearningSolver
from .log import setup_logger
from .solvers.pyomo.base import BasePyomoSolver
from .solvers.pyomo.cplex import CplexPyomoSolver
from .solvers.pyomo.gurobi import GurobiPyomoSolver

View File

@@ -1,203 +1,129 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from copy import deepcopy
import logging
import os
from typing import Dict, List
import pandas as pd
import numpy as np
import logging
from tqdm.auto import tqdm
from .solvers.learning import LearningSolver
from miplearn.components.component import Component
from miplearn.instance.base import Instance
from miplearn.solvers.learning import LearningSolver
logger = logging.getLogger(__name__)
class BenchmarkRunner:
def __init__(self, solvers):
assert isinstance(solvers, dict)
for solver in solvers.values():
assert isinstance(solver, LearningSolver)
self.solvers = solvers
self.results = None
"""
Utility class that simplifies the task of comparing the performance of different
solvers.
def solve(self, instances, tee=False):
for (solver_name, solver) in self.solvers.items():
for i in tqdm(range(len((instances)))):
results = solver.solve(deepcopy(instances[i]), tee=tee)
self._push_result(results, solver=solver, solver_name=solver_name, instance=i)
Example
-------
```python
benchmark = BenchmarkRunner({
"Baseline": LearningSolver(...),
"Strategy A": LearningSolver(...),
"Strategy B": LearningSolver(...),
"Strategy C": LearningSolver(...),
})
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=5)
benchmark.save_results("result.csv")
```
def parallel_solve(self,
instances,
n_jobs=1,
n_trials=1,
index_offset=0,
):
Parameters
----------
solvers: Dict[str, LearningSolver]
Dictionary containing the solvers to compare. Solvers may have different
arguments and components. The key should be the name of the solver. It
appears in the exported tables of results.
"""
def __init__(self, solvers: Dict[str, LearningSolver]) -> None:
self.solvers: Dict[str, LearningSolver] = solvers
self.results = pd.DataFrame(
columns=[
"Solver",
"Instance",
]
)
def parallel_solve(
self,
instances: List[Instance],
n_jobs: int = 1,
n_trials: int = 3,
) -> None:
"""
Solves the given instances in parallel and collect benchmark statistics.
Parameters
----------
instances: List[Instance]
List of instances to solve. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.
n_jobs: int
List of instances to solve in parallel at a time.
n_trials: int
How many times each instance should be solved.
"""
self._silence_miplearn_logger()
trials = instances * n_trials
for (solver_name, solver) in self.solvers.items():
results = solver.parallel_solve(trials,
results = solver.parallel_solve(
trials,
n_jobs=n_jobs,
label="Solve (%s)" % solver_name)
label="Solve (%s)" % solver_name,
discard_outputs=True,
)
for i in range(len(trials)):
idx = (i % len(instances)) + index_offset
self._push_result(results[i],
solver=solver,
solver_name=solver_name,
instance=idx)
idx = i % len(instances)
results[i]["Solver"] = solver_name
results[i]["Instance"] = idx
self.results = self.results.append(pd.DataFrame([results[i]]))
self._restore_miplearn_logger()
def raw_results(self):
return self.results
def write_csv(self, filename: str) -> None:
"""
Writes the collected results to a CSV file.
def save_results(self, filename):
Parameters
----------
filename: str
The name of the file.
"""
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.results.to_csv(filename)
def load_results(self, filename):
self.results = pd.read_csv(filename, index_col=0)
def fit(self, instances: List[Instance], n_jobs: int = 1) -> None:
"""
Trains all solvers with the provided training instances.
def load_state(self, filename):
for (solver_name, solver) in self.solvers.items():
solver.load_state(filename)
def fit(self, training_instances):
for (solver_name, solver) in self.solvers.items():
solver.fit(training_instances)
def _push_result(self, result, solver, solver_name, instance):
if self.results is None:
self.results = pd.DataFrame(columns=["Solver",
"Instance",
"Wallclock Time",
"Lower Bound",
"Upper Bound",
"Gap",
"Nodes",
"Mode",
"Sense",
"Predicted LB",
"Predicted UB",
])
lb = result["Lower bound"]
ub = result["Upper bound"]
gap = (ub - lb) / lb
if "Predicted LB" not in result:
result["Predicted LB"] = float("nan")
result["Predicted UB"] = float("nan")
self.results = self.results.append({
"Solver": solver_name,
"Instance": instance,
"Wallclock Time": result["Wallclock time"],
"Lower Bound": lb,
"Upper Bound": ub,
"Gap": gap,
"Nodes": result["Nodes"],
"Mode": solver.mode,
"Sense": result["Sense"],
"Predicted LB": result["Predicted LB"],
"Predicted UB": result["Predicted UB"],
}, ignore_index=True)
groups = self.results.groupby("Instance")
best_lower_bound = groups["Lower Bound"].transform("max")
best_upper_bound = groups["Upper Bound"].transform("min")
best_gap = groups["Gap"].transform("min")
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
best_wallclock_time = groups["Wallclock Time"].transform("min")
self.results["Relative Lower Bound"] = \
self.results["Lower Bound"] / best_lower_bound
self.results["Relative Upper Bound"] = \
self.results["Upper Bound"] / best_upper_bound
self.results["Relative Wallclock Time"] = \
self.results["Wallclock Time"] / best_wallclock_time
self.results["Relative Gap"] = \
self.results["Gap"] / best_gap
self.results["Relative Nodes"] = \
self.results["Nodes"] / best_nodes
def save_chart(self, filename):
import matplotlib.pyplot as plt
import seaborn as sns
from numpy import median
sns.set_style("whitegrid")
sns.set_palette("Blues_r")
results = self.raw_results()
results["Gap (%)"] = results["Gap"] * 100.0
sense = results.loc[0, "Sense"]
if sense == "min":
primal_column = "Relative Upper Bound"
obj_column = "Upper Bound"
predicted_obj_column = "Predicted UB"
else:
primal_column = "Relative Lower Bound"
obj_column = "Lower Bound"
predicted_obj_column = "Predicted LB"
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,
ncols=4,
figsize=(12,4),
gridspec_kw={'width_ratios': [2, 1, 1, 2]})
# Figure 1: Solver x Wallclock Time
sns.stripplot(x="Solver",
y="Wallclock Time",
data=results,
ax=ax1,
jitter=0.25,
size=4.0,
)
sns.barplot(x="Solver",
y="Wallclock Time",
data=results,
ax=ax1,
errwidth=0.,
alpha=0.4,
estimator=median,
)
ax1.set(ylabel='Wallclock Time (s)')
# Figure 2: Solver x Gap (%)
ax2.set_ylim(-0.5, 5.5)
sns.stripplot(x="Solver",
y="Gap (%)",
jitter=0.25,
data=results[results["Mode"] != "heuristic"],
ax=ax2,
size=4.0,
Parameters
----------
instances: List[Instance]
List of training instances.
n_jobs: int
Number of parallel processes to use.
"""
components: List[Component] = []
for solver in self.solvers.values():
components += solver.components.values()
Component.fit_multiple(
components,
instances,
n_jobs=n_jobs,
)
# Figure 3: Solver x Primal Value
ax3.set_ylim(0.95,1.05)
sns.stripplot(x="Solver",
y=primal_column,
jitter=0.25,
data=results[results["Mode"] == "heuristic"],
ax=ax3,
)
# Figure 4: Predicted vs Actual Objective Value
sns.scatterplot(x=obj_column,
y=predicted_obj_column,
hue="Solver",
data=results[results["Mode"] != "heuristic"],
ax=ax4,
)
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls='-', color="#cccccc")
ax4.set_xlim(xlim)
ax4.set_ylim(ylim)
ax4.get_legend().remove()
fig.tight_layout()
plt.savefig(filename, bbox_inches='tight', dpi=150)
def _silence_miplearn_logger(self):
def _silence_miplearn_logger(self) -> None:
miplearn_logger = logging.getLogger("miplearn")
self.prev_log_level = miplearn_logger.getEffectiveLevel()
miplearn_logger.setLevel(logging.WARNING)
def _restore_miplearn_logger(self):
def _restore_miplearn_logger(self) -> None:
miplearn_logger = logging.getLogger("miplearn")
miplearn_logger.setLevel(self.prev_log_level)

View File

@@ -1,33 +1,163 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from abc import ABC, abstractmethod
from typing import Optional
import numpy as np
class Classifier(ABC):
@abstractmethod
def fit(self, x_train, y_train):
pass
"""
A Classifier decides which class each sample belongs to, based on historical
data.
"""
def __init__(self) -> None:
self.n_features: Optional[int] = None
self.n_classes: Optional[int] = None
@abstractmethod
def predict_proba(self, x_test):
pass
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
"""
Trains the classifier.
def predict(self, x_test):
proba = self.predict_proba(x_test)
assert isinstance(proba, np.ndarray)
assert proba.shape == (x_test.shape[0], 2)
return (proba[:, 1] > 0.5).astype(float)
Parameters
----------
x_train: np.ndarray
An array of features with shape (`n_samples`, `n_features`). Each entry
must be a float.
y_train: np.ndarray
An array of labels with shape (`n_samples`, `n_classes`). Each entry must be
a bool, and there must be exactly one True element in each row.
"""
assert isinstance(x_train, np.ndarray)
assert isinstance(y_train, np.ndarray)
assert x_train.dtype in [
np.float16,
np.float32,
np.float64,
], f"x_train.dtype shoule be float. Found {x_train.dtype} instead."
assert y_train.dtype == np.bool8
assert len(x_train.shape) == 2
assert len(y_train.shape) == 2
(n_samples_x, n_features) = x_train.shape
(n_samples_y, n_classes) = y_train.shape
assert n_samples_y == n_samples_x
self.n_features = n_features
self.n_classes = n_classes
@abstractmethod
def predict_proba(self, x_test: np.ndarray) -> np.ndarray:
"""
Predicts the probability of each sample belonging to each class. Must be called
after fit.
Parameters
----------
x_test: np.ndarray
An array of features with shape (`n_samples`, `n_features`). The number of
features in `x_test` must match the number of features in `x_train` provided
to `fit`.
Returns
-------
np.ndarray
An array of predicted probabilities with shape (`n_samples`, `n_classes`),
where `n_classes` is the number of columns in `y_train` provided to `fit`.
"""
assert self.n_features is not None
assert isinstance(x_test, np.ndarray)
assert len(x_test.shape) == 2
(n_samples, n_features_x) = x_test.shape
assert n_features_x == self.n_features, (
f"Test and training data have different number of "
f"features: {n_features_x} != {self.n_features}"
)
return np.ndarray([])
@abstractmethod
def clone(self) -> "Classifier":
"""
Returns an unfitted copy of this classifier with the same hyperparameters.
"""
pass
class Regressor(ABC):
@abstractmethod
def fit(self, x_train, y_train):
pass
"""
A Regressor tries to predict the values of some continous variables, given the
values of other variables.
"""
def __init__(self) -> None:
self.n_inputs: Optional[int] = None
@abstractmethod
def predict(self):
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
"""
Trains the regressor.
Parameters
----------
x_train: np.ndarray
An array of inputs with shape (`n_samples`, `n_inputs`). Each entry must be
a float.
y_train: np.ndarray
An array of outputs with shape (`n_samples`, `n_outputs`). Each entry must
be a float.
"""
assert isinstance(x_train, np.ndarray)
assert isinstance(y_train, np.ndarray)
assert x_train.dtype in [np.float16, np.float32, np.float64]
assert y_train.dtype in [np.float16, np.float32, np.float64]
assert len(x_train.shape) == 2, (
f"Parameter x_train should be a square matrix. "
f"Found {x_train.shape} ndarray instead."
)
assert len(y_train.shape) == 2, (
f"Parameter y_train should be a square matrix. "
f"Found {y_train.shape} ndarray instead."
)
(n_samples_x, n_inputs) = x_train.shape
(n_samples_y, n_outputs) = y_train.shape
assert n_samples_y == n_samples_x
self.n_inputs = n_inputs
@abstractmethod
def predict(self, x_test: np.ndarray) -> np.ndarray:
"""
Predicts the values of the output variables. Must be called after fit.
Parameters
----------
x_test: np.ndarray
An array of inputs with shape (`n_samples`, `n_inputs`), where `n_inputs`
must match the number of columns in `x_train` provided to `fit`.
Returns
-------
np.ndarray
An array of outputs with shape (`n_samples`, `n_outputs`), where
`n_outputs` is the number of columns in `y_train` provided to `fit`.
"""
assert self.n_inputs is not None
assert isinstance(x_test, np.ndarray), (
f"Parameter x_train must be np.ndarray. "
f"Found {x_test.__class__.__name__} instead."
)
assert len(x_test.shape) == 2
(n_samples, n_inputs_x) = x_test.shape
assert n_inputs_x == self.n_inputs, (
f"Test and training data have different number of "
f"inputs: {n_inputs_x} != {self.n_inputs}"
)
return np.ndarray([])
@abstractmethod
def clone(self) -> "Regressor":
"""
Returns an unfitted copy of this regressor with the same hyperparameters.
"""
pass

View File

@@ -1,66 +1,120 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from copy import deepcopy
from typing import Dict, Optional
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.evaluator import ClassifierEvaluator
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.sklearn import ScikitLearnClassifier
logger = logging.getLogger(__name__)
class CandidateClassifierSpecs:
"""
Specifications describing how to construct a certain classifier, and under
which circumstances it can be used.
Parameters
----------
min_samples: int
Minimum number of samples for this classifier to be considered.
classifier: Callable[[], Classifier]
Callable that constructs the classifier.
"""
def __init__(
self,
classifier: Classifier,
min_samples: int = 0,
) -> None:
self.min_samples = min_samples
self.classifier = classifier
class AdaptiveClassifier(Classifier):
"""
A meta-classifier which dynamically selects what actual classifier to use
based on its cross-validation score on a particular training data set.
Parameters
----------
candidates: Dict[str, CandidateClassifierSpecs]
A dictionary of candidate classifiers to consider, mapping the name of the
candidate to its specs, which describes how to construct it and under what
scenarios. If no candidates are provided, uses a fixed set of defaults,
which includes `CountingClassifier`, `KNeighborsClassifier` and
`LogisticRegression`.
"""
def __init__(self,
candidates=None,
evaluator=ClassifierEvaluator()):
"""
Initializes the meta-classifier.
"""
def __init__(
self,
candidates: Optional[Dict[str, CandidateClassifierSpecs]] = None,
) -> None:
super().__init__()
if candidates is None:
candidates = {
"knn(100)": {
"classifier": KNeighborsClassifier(n_neighbors=100),
"min samples": 100,
},
"logistic": {
"classifier": make_pipeline(StandardScaler(),
LogisticRegression()),
"min samples": 30,
},
"counting": {
"classifier": CountingClassifier(),
"min samples": 0,
}
"knn(100)": CandidateClassifierSpecs(
classifier=ScikitLearnClassifier(
KNeighborsClassifier(n_neighbors=100)
),
min_samples=100,
),
"logistic": CandidateClassifierSpecs(
classifier=ScikitLearnClassifier(
make_pipeline(
StandardScaler(),
LogisticRegression(),
)
),
min_samples=30,
),
"counting": CandidateClassifierSpecs(
classifier=CountingClassifier(),
),
}
self.candidates = candidates
self.evaluator = evaluator
self.classifier = None
self.classifier: Optional[Classifier] = None
def fit(self, x_train, y_train):
best_name, best_clf, best_score = None, None, -float("inf")
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
super().fit(x_train, y_train)
n_samples = x_train.shape[0]
for (name, clf_dict) in self.candidates.items():
if n_samples < clf_dict["min samples"]:
assert y_train.shape == (n_samples, 2)
# If almost all samples belong to the same class, return a fixed prediction and
# skip all the other steps.
if y_train[:, 0].mean() > 0.999 or y_train[:, 1].mean() > 0.999:
self.classifier = CountingClassifier()
self.classifier.fit(x_train, y_train)
return
best_name, best_clf, best_score = None, None, -float("inf")
for (name, specs) in self.candidates.items():
if n_samples < specs.min_samples:
continue
clf = deepcopy(clf_dict["classifier"])
clf = specs.classifier.clone()
clf.fit(x_train, y_train)
score = self.evaluator.evaluate(clf, x_train, y_train)
proba = clf.predict_proba(x_train)
# FIXME: Switch to k-fold cross validation
score = roc_auc_score(y_train[:, 1], proba[:, 1])
if score > best_score:
best_name, best_clf, best_score = name, clf, score
logger.debug("Best classifier: %s (score=%.3f)" % (best_name, best_score))
self.classifier = best_clf
def predict_proba(self, x_test):
def predict_proba(self, x_test: np.ndarray) -> np.ndarray:
super().predict_proba(x_test)
assert self.classifier is not None
return self.classifier.predict_proba(x_test)
def clone(self) -> "AdaptiveClassifier":
return AdaptiveClassifier(self.candidates)

View File

@@ -1,28 +1,45 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import Optional, cast
import numpy as np
from miplearn.classifiers import Classifier
import numpy as np
class CountingClassifier(Classifier):
"""
A classifier that generates constant predictions, based only on the
frequency of the training labels. For example, if y_train is [1.0, 0.0, 0.0]
this classifier always returns [0.66 0.33] for any x_test. It essentially
counts how many times each label appeared, hence the name.
A classifier that generates constant predictions, based only on the frequency of
the training labels. For example, suppose `y_train` is given by:
```python
y_train = np.array([
[True, False],
[False, True],
[False, True],
])
```
Then `predict_proba` always returns `[0.33 0.66]` for every sample, regardless of
`x_train`. It essentially counts how many times each label appeared, hence the name.
"""
def __init__(self):
self.mean = None
def __init__(self) -> None:
super().__init__()
self.mean: Optional[np.ndarray] = None
def fit(self, x_train, y_train):
self.mean = np.mean(y_train)
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
super().fit(x_train, y_train)
self.mean = cast(np.ndarray, np.mean(y_train, axis=0))
def predict_proba(self, x_test):
return np.array([[1 - self.mean, self.mean]
for _ in range(x_test.shape[0])])
def predict_proba(self, x_test: np.ndarray) -> np.ndarray:
super().predict_proba(x_test)
n_samples = x_test.shape[0]
return np.array([self.mean for _ in range(n_samples)])
def __repr__(self):
def __repr__(self) -> str:
return "CountingClassifier(mean=%s)" % self.mean
def clone(self) -> "CountingClassifier":
return CountingClassifier()

View File

@@ -1,71 +1,132 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from copy import deepcopy
import logging
from typing import Optional, List
import numpy as np
from miplearn.classifiers import Classifier
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import logging
from miplearn.classifiers import Classifier
from miplearn.classifiers.sklearn import ScikitLearnClassifier
logger = logging.getLogger(__name__)
class CrossValidatedClassifier(Classifier):
"""
A meta-classifier that, upon training, evaluates the performance of another
classifier on the training data set using k-fold cross validation, then
either adopts the other classifier it if the cv-score is high enough, or
returns a constant label for every x_test otherwise.
candidate classifier on the training data set, using k-fold cross validation,
then either adopts it, if its cv-score is high enough, or returns constant
predictions for every x_test, otherwise.
The threshold is specified in comparison to a dummy classifier trained
on the same dataset. For example, a threshold of 0.0 indicates that any
classifier as good as the dummy predictor is acceptable. A threshold of 1.0
indicates that only classifier with a perfect cross-validation score are
acceptable. Other numbers are a linear interpolation of these two extremes.
Parameters
----------
classifier: Callable[[], ScikitLearnClassifier]
A callable that constructs the candidate classifier.
threshold: float
Number from zero to one indicating how well must the candidate classifier
perform to be adopted. The threshold is specified in comparison to a dummy
classifier trained on the same dataset. For example, a threshold of 0.0
indicates that any classifier as good as the dummy predictor is acceptable. A
threshold of 1.0 indicates that only classifiers with perfect
cross-validation scores are acceptable. Other numbers are a linear
interpolation of these two extremes.
constant: Optional[List[bool]]
If the candidate classifier fails to meet the threshold, use a dummy classifier
which always returns this prediction instead. The list should have exactly as
many elements as the number of columns of `x_train` provided to `fit`.
cv: int
Number of folds.
scoring: str
Scoring function.
"""
def __init__(self,
classifier=LogisticRegression(),
threshold=0.75,
constant=0.0,
cv=5,
scoring='accuracy'):
self.classifier = None
def __init__(
self,
classifier: ScikitLearnClassifier = ScikitLearnClassifier(LogisticRegression()),
threshold: float = 0.75,
constant: Optional[List[bool]] = None,
cv: int = 5,
scoring: str = "accuracy",
):
super().__init__()
if constant is None:
constant = [True, False]
self.n_classes = len(constant)
self.classifier: Optional[ScikitLearnClassifier] = None
self.classifier_prototype = classifier
self.constant = constant
self.constant: List[bool] = constant
self.threshold = threshold
self.cv = cv
self.scoring = scoring
def fit(self, x_train, y_train):
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
super().fit(x_train, y_train)
(n_samples, n_classes) = x_train.shape
assert n_classes == self.n_classes
# Calculate dummy score and absolute score threshold
y_train_avg = np.average(y_train)
dummy_score = max(y_train_avg, 1 - y_train_avg)
absolute_threshold = 1. * self.threshold + dummy_score * (1 - self.threshold)
absolute_threshold = 1.0 * self.threshold + dummy_score * (1 - self.threshold)
# Calculate cross validation score and decide which classifier to use
clf = deepcopy(self.classifier_prototype)
cv_score = float(np.mean(cross_val_score(clf,
clf = self.classifier_prototype.clone()
assert clf is not None
assert isinstance(clf, ScikitLearnClassifier), (
f"The provided classifier callable must return a ScikitLearnClassifier. "
f"Found {clf.__class__.__name__} instead. If this is a scikit-learn "
f"classifier, you must wrap it with ScikitLearnClassifier."
)
cv_score = float(
np.mean(
cross_val_score(
clf.inner_clf,
x_train,
y_train,
y_train[:, 1],
cv=self.cv,
scoring=self.scoring)))
scoring=self.scoring,
)
)
)
if cv_score >= absolute_threshold:
logger.debug("cv_score is above threshold (%.2f >= %.2f); keeping" %
(cv_score, absolute_threshold))
logger.debug(
"cv_score is above threshold (%.2f >= %.2f); keeping"
% (cv_score, absolute_threshold)
)
self.classifier = clf
else:
logger.debug("cv_score is below threshold (%.2f < %.2f); discarding" %
(cv_score, absolute_threshold))
self.classifier = DummyClassifier(strategy="constant",
constant=self.constant)
logger.debug(
"cv_score is below threshold (%.2f < %.2f); discarding"
% (cv_score, absolute_threshold)
)
self.classifier = ScikitLearnClassifier(
DummyClassifier(
strategy="constant",
constant=self.constant[1],
)
)
# Train chosen classifier
assert self.classifier is not None
assert isinstance(self.classifier, ScikitLearnClassifier)
self.classifier.fit(x_train, y_train)
def predict_proba(self, x_test):
def predict_proba(self, x_test: np.ndarray) -> np.ndarray:
super().predict_proba(x_test)
assert self.classifier is not None
return self.classifier.predict_proba(x_test)
def clone(self) -> "CrossValidatedClassifier":
return CrossValidatedClassifier(
classifier=self.classifier_prototype,
threshold=self.threshold,
constant=self.constant,
cv=self.cv,
scoring=self.scoring,
)

View File

@@ -1,15 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from sklearn.metrics import roc_auc_score
class ClassifierEvaluator:
def __init__(self):
pass
def evaluate(self, clf, x_train, y_train):
# FIXME: use cross-validation
proba = clf.predict_proba(x_train)
return roc_auc_score(y_train, proba[:, 1])

View File

@@ -0,0 +1,93 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import Optional, Any, cast
import numpy as np
import sklearn
from miplearn.classifiers import Classifier, Regressor
class ScikitLearnClassifier(Classifier):
"""
Wrapper for ScikitLearn classifiers, which makes sure inputs and outputs have the
correct dimensions and types.
"""
def __init__(self, clf: Any) -> None:
super().__init__()
self.inner_clf = clf
self.constant: Optional[np.ndarray] = None
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
super().fit(x_train, y_train)
(n_samples, n_classes) = y_train.shape
assert n_classes == 2, (
f"Scikit-learn classifiers must have exactly two classes. "
f"{n_classes} classes were provided instead."
)
# When all samples belong to the same class, sklearn's predict_proba returns
# an array with a single column. The following check avoid this strange
# behavior.
mean = cast(np.ndarray, y_train.astype(float).mean(axis=0))
if mean.max() == 1.0:
self.constant = mean
return
self.inner_clf.fit(x_train, y_train[:, 1])
def predict_proba(self, x_test: np.ndarray) -> np.ndarray:
super().predict_proba(x_test)
n_samples = x_test.shape[0]
if self.constant is not None:
return np.array([self.constant for n in range(n_samples)])
sklearn_proba = self.inner_clf.predict_proba(x_test)
if isinstance(sklearn_proba, list):
assert len(sklearn_proba) == self.n_classes
for pb in sklearn_proba:
assert isinstance(pb, np.ndarray)
assert pb.dtype in [np.float16, np.float32, np.float64]
assert pb.shape == (n_samples, 2)
proba = np.hstack([pb[:, [1]] for pb in sklearn_proba])
assert proba.shape == (n_samples, self.n_classes)
return proba
else:
assert isinstance(sklearn_proba, np.ndarray)
assert sklearn_proba.shape == (n_samples, 2)
return sklearn_proba
def clone(self) -> "ScikitLearnClassifier":
return ScikitLearnClassifier(
clf=sklearn.base.clone(self.inner_clf),
)
class ScikitLearnRegressor(Regressor):
"""
Wrapper for ScikitLearn regressors, which makes sure inputs and outputs have the
correct dimensions and types.
"""
def __init__(self, reg: Any) -> None:
super().__init__()
self.inner_reg = reg
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
super().fit(x_train, y_train)
self.inner_reg.fit(x_train, y_train)
def predict(self, x_test: np.ndarray) -> np.ndarray:
super().predict(x_test)
n_samples = x_test.shape[0]
sklearn_pred = self.inner_reg.predict(x_test)
assert isinstance(sklearn_pred, np.ndarray)
assert sklearn_pred.shape[0] == n_samples
return sklearn_pred
def clone(self) -> "ScikitLearnRegressor":
return ScikitLearnRegressor(
reg=sklearn.base.clone(self.inner_reg),
)

View File

@@ -1,18 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn.classifiers.counting import CountingClassifier
import numpy as np
from numpy.linalg import norm
E = 0.1
def test_counting():
clf = CountingClassifier()
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
expected_proba = np.array([[0.375, 0.625],
[0.375, 0.625]])
actual_proba = clf.predict_proba(np.zeros((2, 25)))
assert norm(actual_proba - expected_proba) < E

View File

@@ -1,46 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import numpy as np
from miplearn.classifiers.cv import CrossValidatedClassifier
from numpy.linalg import norm
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
E = 0.1
def test_cv():
# Training set: label is true if point is inside a 2D circle
x_train = np.array([[x1, x2]
for x1 in range(-10, 11)
for x2 in range(-10, 11)])
x_train = StandardScaler().fit_transform(x_train)
n_samples = x_train.shape[0]
y_train = np.array([1.0 if x1*x1 + x2*x2 <= 100 else 0.0
for x1 in range(-10, 11)
for x2 in range(-10, 11)])
# Support vector machines with linear kernels do not perform well on this
# data set, so predictor should return the given constant.
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
random_state=42),
threshold=0.90,
constant=0.0,
cv=30)
clf.fit(x_train, y_train)
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
# Support vector machines with quadratic kernels perform almost perfectly
# on this data set, so predictor should return their prediction.
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
kernel='poly',
degree=2,
random_state=42),
threshold=0.90,
cv=30)
clf.fit(x_train, y_train)
print(y_train - clf.predict(x_train))
assert norm(y_train - clf.predict(x_train)) < E

View File

@@ -1,20 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import numpy as np
from miplearn.classifiers.evaluator import ClassifierEvaluator
from sklearn.neighbors import KNeighborsClassifier
def test_evaluator():
clf_a = KNeighborsClassifier(n_neighbors=1)
clf_b = KNeighborsClassifier(n_neighbors=2)
x_train = np.array([[0, 0], [1, 0]])
y_train = np.array([0, 1])
clf_a.fit(x_train, y_train)
clf_b.fit(x_train, y_train)
ev = ClassifierEvaluator()
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
assert ev.evaluate(clf_b, x_train, y_train) == 0.5

View File

@@ -1,34 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock
import numpy as np
from miplearn.classifiers import Classifier
from miplearn.classifiers.threshold import MinPrecisionThreshold
def test_threshold_dynamic():
clf = Mock(spec=Classifier)
clf.predict_proba = Mock(return_value=np.array([
[0.10, 0.90],
[0.10, 0.90],
[0.20, 0.80],
[0.30, 0.70],
]))
x_train = np.array([0, 1, 2, 3])
y_train = np.array([1, 1, 0, 0])
threshold = MinPrecisionThreshold(min_precision=1.0)
assert threshold.find(clf, x_train, y_train) == 0.90
threshold = MinPrecisionThreshold(min_precision=0.65)
assert threshold.find(clf, x_train, y_train) == 0.80
threshold = MinPrecisionThreshold(min_precision=0.50)
assert threshold.find(clf, x_train, y_train) == 0.70
threshold = MinPrecisionThreshold(min_precision=0.00)
assert threshold.find(clf, x_train, y_train) == 0.70

View File

@@ -1,45 +1,128 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from abc import abstractmethod, ABC
from typing import Optional, List
import numpy as np
from sklearn.metrics._ranking import _binary_clf_curve
from miplearn.classifiers import Classifier
class DynamicThreshold(ABC):
@abstractmethod
def find(self, clf, x_train, y_train):
class Threshold(ABC):
"""
Given a trained binary classifier `clf` and a training data set,
returns the numerical threshold (float) satisfying some criterea.
Solver components ask the machine learning models how confident are they on each
prediction they make, then automatically discard all predictions that have low
confidence. A Threshold specifies how confident should the ML models be for a
prediction to be considered trustworthy.
To model dynamic thresholds, which automatically adjust themselves during
training to reach some desired target (such as minimum precision, or minimum
recall), thresholds behave somewhat similar to ML models themselves, with `fit`
and `predict` methods.
"""
@abstractmethod
def fit(
self,
clf: Classifier,
x_train: np.ndarray,
y_train: np.ndarray,
) -> None:
"""
Given a trained binary classifier `clf`, calibrates itself based on the
classifier's performance on the given training data set.
"""
assert isinstance(clf, Classifier)
assert isinstance(x_train, np.ndarray)
assert isinstance(y_train, np.ndarray)
n_samples = x_train.shape[0]
assert y_train.shape[0] == n_samples
@abstractmethod
def predict(self, x_test: np.ndarray) -> List[float]:
"""
Returns the minimum probability for a machine learning prediction to be
considered trustworthy. There is one value for each label.
"""
pass
@abstractmethod
def clone(self) -> "Threshold":
"""
Returns an unfitted copy of this threshold with the same hyperparameters.
"""
pass
class MinPrecisionThreshold(DynamicThreshold):
class MinProbabilityThreshold(Threshold):
"""
The smallest possible threshold satisfying a minimum acceptable true
positive rate (also known as precision).
A threshold which considers predictions trustworthy if their probability of being
correct, as computed by the machine learning models, are above a fixed value.
"""
def __init__(self, min_precision):
def __init__(self, min_probability: List[float]):
self.min_probability = min_probability
def fit(self, clf: Classifier, x_train: np.ndarray, y_train: np.ndarray) -> None:
pass
def predict(self, x_test: np.ndarray) -> List[float]:
return self.min_probability
def clone(self) -> "MinProbabilityThreshold":
return MinProbabilityThreshold(self.min_probability)
class MinPrecisionThreshold(Threshold):
"""
A dynamic threshold which automatically adjusts itself during training to ensure
that the component achieves at least a given precision `p` on the training data
set. Note that increasing a component's minimum precision may reduce its recall.
"""
def __init__(self, min_precision: List[float]) -> None:
self.min_precision = min_precision
self._computed_threshold: Optional[List[float]] = None
def find(self, clf, x_train, y_train):
def fit(
self,
clf: Classifier,
x_train: np.ndarray,
y_train: np.ndarray,
) -> None:
super().fit(clf, x_train, y_train)
(n_samples, n_classes) = y_train.shape
proba = clf.predict_proba(x_train)
self._computed_threshold = [
self._compute(
y_train[:, i],
proba[:, i],
self.min_precision[i],
)
for i in range(n_classes)
]
assert isinstance(proba, np.ndarray), \
"classifier should return numpy array"
assert proba.shape == (x_train.shape[0], 2), \
"classifier should return (%d,%d)-shaped array, not %s" % (
x_train.shape[0], 2, str(proba.shape))
def predict(self, x_test: np.ndarray) -> List[float]:
assert self._computed_threshold is not None
return self._computed_threshold
fps, tps, thresholds = _binary_clf_curve(y_train, proba[:, 1])
@staticmethod
def _compute(
y_actual: np.ndarray,
y_prob: np.ndarray,
min_precision: float,
) -> float:
fps, tps, thresholds = _binary_clf_curve(y_actual, y_prob)
precision = tps / (tps + fps)
for k in reversed(range(len(precision))):
if precision[k] >= self.min_precision:
if precision[k] >= min_precision:
return thresholds[k]
return 2.0
return float("inf")
def clone(self) -> "MinPrecisionThreshold":
return MinPrecisionThreshold(
min_precision=self.min_precision,
)

View File

@@ -1,12 +1,18 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import Dict
def classifier_evaluation_dict(tp, tn, fp, fn):
def classifier_evaluation_dict(
tp: int,
tn: int,
fp: int,
fn: int,
) -> Dict[str, float]:
p = tp + fn
n = fp + tn
d = {
d: Dict = {
"Predicted positive": fp + tp,
"Predicted negative": fn + tn,
"Condition positive": p,

View File

@@ -1,29 +1,254 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from abc import ABC, abstractmethod
from typing import Any, List, TYPE_CHECKING, Tuple, Dict, Optional
import numpy as np
from p_tqdm import p_umap
from miplearn.features.sample import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats, Category
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
class Component(ABC):
# noinspection PyMethodMayBeStatic
class Component:
"""
A Component is an object which adds functionality to a LearningSolver.
For better code maintainability, LearningSolver simply delegates most of its
functionality to Components. Each Component is responsible for exactly one ML
strategy.
"""
@abstractmethod
def before_solve(self, solver, instance, model):
pass
def after_solve_lp(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
"""
Method called by LearningSolver after the root LP relaxation is solved.
See before_solve_lp for a description of the parameters.
"""
return
@abstractmethod
def after_solve(self, solver, instance, model, results):
pass
def after_solve_mip(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
"""
Method called by LearningSolver after the MIP is solved.
See before_solve_lp for a description of the parameters.
"""
return
@abstractmethod
def fit(self, training_instances):
pass
def before_solve_lp(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
"""
Method called by LearningSolver before the root LP relaxation is solved.
def after_iteration(self, solver, instance, model):
Parameters
----------
solver: LearningSolver
The solver calling this method.
instance: Instance
The instance being solved.
model
The concrete optimization model being solved.
stats: LearningSolveStats
A dictionary containing statistics about the solution process, such as
number of nodes explored and running time. Components are free to add
their own statistics here. For example, PrimalSolutionComponent adds
statistics regarding the number of predicted variables. All statistics in
this dictionary are exported to the benchmark CSV file.
sample: miplearn.features.Sample
An object containing data that may be useful for training machine
learning models and accelerating the solution process. Components are
free to add their own training data here.
"""
return
def before_solve_mip(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
"""
Method called by LearningSolver before the MIP is solved.
See before_solve_lp for a description of the parameters.
"""
return
def fit_xy(
self,
x: Dict[Category, np.ndarray],
y: Dict[Category, np.ndarray],
) -> None:
"""
Given two dictionaries x and y, mapping the name of the category to matrices
of features and targets, this function does two things. First, for each
category, it creates a clone of the prototype regressor/classifier. Second,
it passes (x[category], y[category]) to the clone's fit method.
"""
return
def iteration_cb(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
) -> bool:
"""
Method called by LearningSolver at the end of each iteration.
After solving the MIP, LearningSolver calls `iteration_cb` of each component,
giving them a chance to modify the problem and resolve it before the solution
process ends. For example, the lazy constraint component uses `iteration_cb`
to check that all lazy constraints are satisfied.
If `iteration_cb` returns False for all components, the solution process
ends. If it retunrs True for any component, the MIP is solved again.
Parameters
----------
solver: LearningSolver
The solver calling this method.
instance: Instance
The instance being solved.
model: Any
The concrete optimization model being solved.
"""
return False
def on_lazy_callback(self, solver, instance, model):
def lazy_cb(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
) -> None:
return
def sample_evaluate(
self,
instance: Optional[Instance],
sample: Sample,
) -> Dict[str, Dict[str, float]]:
return {}
def sample_xy(
self,
instance: Optional[Instance],
sample: Sample,
) -> Tuple[Dict, Dict]:
"""
Returns a pair of x and y dictionaries containing, respectively, the matrices
of ML features and the labels for the sample. If the training sample does not
include label information, returns (x, {}).
"""
pass
def pre_fit(self, pre: List[Any]) -> None:
pass
def user_cut_cb(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
) -> None:
return
def pre_sample_xy(self, instance: Instance, sample: Sample) -> Any:
pass
@staticmethod
def fit_multiple(
components: List["Component"],
instances: List[Instance],
n_jobs: int = 1,
) -> None:
# Part I: Pre-fit
def _pre_sample_xy(instance: Instance) -> Dict:
pre_instance: Dict = {}
for (cidx, comp) in enumerate(components):
pre_instance[cidx] = []
instance.load()
for sample in instance.get_samples():
for (cidx, comp) in enumerate(components):
pre_instance[cidx].append(comp.pre_sample_xy(instance, sample))
instance.free()
return pre_instance
if n_jobs == 1:
pre = [_pre_sample_xy(instance) for instance in instances]
else:
pre = p_umap(_pre_sample_xy, instances, num_cpus=n_jobs)
pre_combined: Dict = {}
for (cidx, comp) in enumerate(components):
pre_combined[cidx] = []
for p in pre:
pre_combined[cidx].extend(p[cidx])
for (cidx, comp) in enumerate(components):
comp.pre_fit(pre_combined[cidx])
# Part II: Fit
def _sample_xy(instance: Instance) -> Tuple[Dict, Dict]:
x_instance: Dict = {}
y_instance: Dict = {}
for (cidx, comp) in enumerate(components):
x_instance[cidx] = {}
y_instance[cidx] = {}
instance.load()
for sample in instance.get_samples():
for (cidx, comp) in enumerate(components):
x = x_instance[cidx]
y = y_instance[cidx]
x_sample, y_sample = comp.sample_xy(instance, sample)
for cat in x_sample.keys():
if cat not in x:
x[cat] = []
y[cat] = []
x[cat] += x_sample[cat]
y[cat] += y_sample[cat]
instance.free()
return x_instance, y_instance
if n_jobs == 1:
xy = [_sample_xy(instance) for instance in instances]
else:
xy = p_umap(_sample_xy, instances)
for (cidx, comp) in enumerate(components):
x_comp: Dict = {}
y_comp: Dict = {}
for (x, y) in xy:
for cat in x[cidx].keys():
if cat not in x_comp:
x_comp[cat] = []
y_comp[cat] = []
x_comp[cat].extend(x[cidx][cat])
y_comp[cat].extend(y[cidx][cat])
for cat in x_comp.keys():
x_comp[cat] = np.array(x_comp[cat], dtype=np.float32)
y_comp[cat] = np.array(y_comp[cat])
comp.fit_xy(x_comp, y_comp)

View File

@@ -1,96 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import sys
from copy import deepcopy
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from .component import Component
from ..extractors import *
logger = logging.getLogger(__name__)
class UserCutsComponent(Component):
"""
A component that predicts which user cuts to enforce.
"""
def __init__(self,
classifier=CountingClassifier(),
threshold=0.05):
self.violations = set()
self.count = {}
self.n_samples = 0
self.threshold = threshold
self.classifier_prototype = classifier
self.classifiers = {}
def before_solve(self, solver, instance, model):
instance.found_violated_user_cuts = []
logger.info("Predicting violated user cuts...")
violations = self.predict(instance)
logger.info("Enforcing %d user cuts..." % len(violations))
for v in violations:
cut = instance.build_user_cut(model, v)
solver.internal_solver.add_constraint(cut)
def after_solve(self, solver, instance, model, results):
pass
def fit(self, training_instances):
logger.debug("Fitting...")
features = InstanceFeaturesExtractor().extract(training_instances)
self.classifiers = {}
violation_to_instance_idx = {}
for (idx, instance) in enumerate(training_instances):
if not hasattr(instance, "found_violated_user_cuts"):
continue
for v in instance.found_violated_user_cuts:
if v not in self.classifiers:
self.classifiers[v] = deepcopy(self.classifier_prototype)
violation_to_instance_idx[v] = []
violation_to_instance_idx[v] += [idx]
for (v, classifier) in tqdm(self.classifiers.items(),
desc="Fit (user cuts)",
disable=not sys.stdout.isatty(),
):
logger.debug("Training: %s" % (str(v)))
label = np.zeros(len(training_instances))
label[violation_to_instance_idx[v]] = 1.0
classifier.fit(features, label)
def predict(self, instance):
violations = []
features = InstanceFeaturesExtractor().extract([instance])
for (v, classifier) in self.classifiers.items():
proba = classifier.predict_proba(features)
if proba[0][1] > self.threshold:
violations += [v]
return violations
def evaluate(self, instances):
results = {}
all_violations = set()
for instance in instances:
all_violations |= set(instance.found_violated_user_cuts)
for idx in tqdm(range(len(instances)),
desc="Evaluate (lazy)",
disable=not sys.stdout.isatty(),
):
instance = instances[idx]
condition_positive = set(instance.found_violated_user_cuts)
condition_negative = all_violations - condition_positive
pred_positive = set(self.predict(instance)) & all_violations
pred_negative = all_violations - pred_positive
tp = len(pred_positive & condition_positive)
tn = len(pred_negative & condition_negative)
fp = len(pred_positive & condition_negative)
fn = len(pred_negative & condition_positive)
results[idx] = classifier_evaluation_dict(tp, tn, fp, fn)
return results

View File

@@ -0,0 +1,171 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import Dict, List, Tuple, Optional, Any, Set
import numpy as np
from overrides import overrides
from miplearn.features.extractor import FeaturesExtractor
from miplearn.classifiers import Classifier
from miplearn.classifiers.threshold import Threshold
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.features.sample import Sample
from miplearn.instance.base import Instance
from miplearn.types import ConstraintCategory, ConstraintName
logger = logging.getLogger(__name__)
class DynamicConstraintsComponent(Component):
"""
Base component used by both DynamicLazyConstraintsComponent and UserCutsComponent.
"""
def __init__(
self,
attr: str,
classifier: Classifier,
threshold: Threshold,
):
assert isinstance(classifier, Classifier)
self.threshold_prototype: Threshold = threshold
self.classifier_prototype: Classifier = classifier
self.classifiers: Dict[ConstraintCategory, Classifier] = {}
self.thresholds: Dict[ConstraintCategory, Threshold] = {}
self.known_cids: List[ConstraintName] = []
self.attr = attr
def sample_xy_with_cids(
self,
instance: Optional[Instance],
sample: Sample,
) -> Tuple[
Dict[ConstraintCategory, List[List[float]]],
Dict[ConstraintCategory, List[List[bool]]],
Dict[ConstraintCategory, List[ConstraintName]],
]:
if len(self.known_cids) == 0:
return {}, {}, {}
assert instance is not None
x: Dict[ConstraintCategory, List[List[float]]] = {}
y: Dict[ConstraintCategory, List[List[bool]]] = {}
cids: Dict[ConstraintCategory, List[ConstraintName]] = {}
known_cids = np.array(self.known_cids, dtype="S")
enforced_cids = None
enforced_cids_np = sample.get_array(self.attr)
if enforced_cids_np is not None:
enforced_cids = list(enforced_cids_np)
# Get user-provided constraint features
(
constr_features,
constr_categories,
constr_lazy,
) = FeaturesExtractor._extract_user_features_constrs(instance, known_cids)
# Augment with instance features
instance_features = sample.get_array("static_instance_features")
assert instance_features is not None
constr_features = np.hstack(
[
instance_features.reshape(1, -1).repeat(len(known_cids), axis=0),
constr_features,
]
)
categories = np.unique(constr_categories)
for c in categories:
x[c] = constr_features[constr_categories == c].tolist()
cids[c] = known_cids[constr_categories == c].tolist()
if enforced_cids is not None:
tmp = np.isin(cids[c], enforced_cids).reshape(-1, 1)
y[c] = np.hstack([~tmp, tmp]).tolist() # type: ignore
return x, y, cids
@overrides
def sample_xy(
self,
instance: Optional[Instance],
sample: Sample,
) -> Tuple[Dict, Dict]:
x, y, _ = self.sample_xy_with_cids(instance, sample)
return x, y
@overrides
def pre_fit(self, pre: List[Any]) -> None:
assert pre is not None
known_cids: Set = set()
for cids in pre:
known_cids |= set(list(cids))
self.known_cids.clear()
self.known_cids.extend(sorted(known_cids))
def sample_predict(
self,
instance: Instance,
sample: Sample,
) -> List[ConstraintName]:
pred: List[ConstraintName] = []
if len(self.known_cids) == 0:
logger.info("Classifiers not fitted. Skipping.")
return pred
x, _, cids = self.sample_xy_with_cids(instance, sample)
for category in x.keys():
assert category in self.classifiers
assert category in self.thresholds
clf = self.classifiers[category]
thr = self.thresholds[category]
nx = np.array(x[category])
proba = clf.predict_proba(nx)
t = thr.predict(nx)
for i in range(proba.shape[0]):
if proba[i][1] > t[1]:
pred += [cids[category][i]]
return pred
@overrides
def pre_sample_xy(self, instance: Instance, sample: Sample) -> Any:
return sample.get_array(self.attr)
@overrides
def fit_xy(
self,
x: Dict[ConstraintCategory, np.ndarray],
y: Dict[ConstraintCategory, np.ndarray],
) -> None:
for category in x.keys():
self.classifiers[category] = self.classifier_prototype.clone()
self.thresholds[category] = self.threshold_prototype.clone()
npx = np.array(x[category])
npy = np.array(y[category])
self.classifiers[category].fit(npx, npy)
self.thresholds[category].fit(self.classifiers[category], npx, npy)
@overrides
def sample_evaluate(
self,
instance: Instance,
sample: Sample,
) -> Dict[str, float]:
actual = sample.get_array(self.attr)
assert actual is not None
pred = set(self.sample_predict(instance, sample))
tp, tn, fp, fn = 0, 0, 0, 0
for cid in self.known_cids:
if cid in pred:
if cid in actual:
tp += 1
else:
fp += 1
else:
if cid in actual:
fn += 1
else:
tn += 1
return classifier_evaluation_dict(tp=tp, tn=tn, fp=fp, fn=fn)

View File

@@ -0,0 +1,145 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import pdb
from typing import Dict, List, TYPE_CHECKING, Tuple, Any, Optional, Set
import numpy as np
from overrides import overrides
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.threshold import MinProbabilityThreshold, Threshold
from miplearn.components.component import Component
from miplearn.components.dynamic_common import DynamicConstraintsComponent
from miplearn.features.sample import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats, ConstraintName, ConstraintCategory
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
class DynamicLazyConstraintsComponent(Component):
"""
A component that predicts which lazy constraints to enforce.
"""
def __init__(
self,
classifier: Classifier = CountingClassifier(),
threshold: Threshold = MinProbabilityThreshold([0, 0.05]),
):
self.dynamic: DynamicConstraintsComponent = DynamicConstraintsComponent(
classifier=classifier,
threshold=threshold,
attr="mip_constr_lazy_enforced",
)
self.classifiers = self.dynamic.classifiers
self.thresholds = self.dynamic.thresholds
self.known_cids = self.dynamic.known_cids
self.lazy_enforced: Set[ConstraintName] = set()
@staticmethod
def enforce(
cids: List[ConstraintName],
instance: Instance,
model: Any,
solver: "LearningSolver",
) -> None:
assert solver.internal_solver is not None
for cid in cids:
instance.enforce_lazy_constraint(solver.internal_solver, model, cid)
@overrides
def before_solve_mip(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
self.lazy_enforced.clear()
logger.info("Predicting violated (dynamic) lazy constraints...")
cids = self.dynamic.sample_predict(instance, sample)
logger.info("Enforcing %d lazy constraints..." % len(cids))
self.enforce(cids, instance, model, solver)
@overrides
def after_solve_mip(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
sample.put_array(
"mip_constr_lazy_enforced",
np.array(list(self.lazy_enforced), dtype="S"),
)
@overrides
def iteration_cb(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
) -> bool:
assert solver.internal_solver is not None
logger.debug("Finding violated lazy constraints...")
cids = instance.find_violated_lazy_constraints(solver.internal_solver, model)
if len(cids) == 0:
logger.debug("No violations found")
return False
else:
self.lazy_enforced |= set(cids)
logger.debug(" %d violations found" % len(cids))
self.enforce(cids, instance, model, solver)
return True
# Delegate ML methods to self.dynamic
# -------------------------------------------------------------------
@overrides
def sample_xy(
self,
instance: Optional[Instance],
sample: Sample,
) -> Tuple[Dict, Dict]:
return self.dynamic.sample_xy(instance, sample)
@overrides
def pre_fit(self, pre: List[Any]) -> None:
self.dynamic.pre_fit(pre)
def sample_predict(
self,
instance: Instance,
sample: Sample,
) -> List[ConstraintName]:
return self.dynamic.sample_predict(instance, sample)
@overrides
def pre_sample_xy(self, instance: Instance, sample: Sample) -> Any:
return self.dynamic.pre_sample_xy(instance, sample)
@overrides
def fit_xy(
self,
x: Dict[ConstraintCategory, np.ndarray],
y: Dict[ConstraintCategory, np.ndarray],
) -> None:
self.dynamic.fit_xy(x, y)
@overrides
def sample_evaluate(
self,
instance: Instance,
sample: Sample,
) -> Dict[ConstraintCategory, Dict[str, float]]:
return self.dynamic.sample_evaluate(instance, sample)

View File

@@ -0,0 +1,137 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import Any, TYPE_CHECKING, Set, Tuple, Dict, List, Optional
import numpy as np
from overrides import overrides
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.threshold import Threshold, MinProbabilityThreshold
from miplearn.components.component import Component
from miplearn.components.dynamic_common import DynamicConstraintsComponent
from miplearn.features.sample import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats, ConstraintName, ConstraintCategory
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
class UserCutsComponent(Component):
def __init__(
self,
classifier: Classifier = CountingClassifier(),
threshold: Threshold = MinProbabilityThreshold([0.50, 0.50]),
) -> None:
self.dynamic = DynamicConstraintsComponent(
classifier=classifier,
threshold=threshold,
attr="mip_user_cuts_enforced",
)
self.enforced: Set[ConstraintName] = set()
self.n_added_in_callback = 0
@overrides
def before_solve_mip(
self,
solver: "LearningSolver",
instance: "Instance",
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
assert solver.internal_solver is not None
self.enforced.clear()
self.n_added_in_callback = 0
logger.info("Predicting violated user cuts...")
cids = self.dynamic.sample_predict(instance, sample)
logger.info("Enforcing %d user cuts ahead-of-time..." % len(cids))
for cid in cids:
instance.enforce_user_cut(solver.internal_solver, model, cid)
stats["UserCuts: Added ahead-of-time"] = len(cids)
@overrides
def user_cut_cb(
self,
solver: "LearningSolver",
instance: "Instance",
model: Any,
) -> None:
assert solver.internal_solver is not None
logger.debug("Finding violated user cuts...")
cids = instance.find_violated_user_cuts(model)
logger.debug(f"Found {len(cids)} violated user cuts")
logger.debug("Building violated user cuts...")
for cid in cids:
if cid in self.enforced:
continue
assert isinstance(cid, ConstraintName)
instance.enforce_user_cut(solver.internal_solver, model, cid)
self.enforced.add(cid)
self.n_added_in_callback += 1
if len(cids) > 0:
logger.debug(f"Added {len(cids)} violated user cuts")
@overrides
def after_solve_mip(
self,
solver: "LearningSolver",
instance: "Instance",
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
sample.put_array(
"mip_user_cuts_enforced",
np.array(list(self.enforced), dtype="S"),
)
stats["UserCuts: Added in callback"] = self.n_added_in_callback
if self.n_added_in_callback > 0:
logger.info(f"{self.n_added_in_callback} user cuts added in callback")
# Delegate ML methods to self.dynamic
# -------------------------------------------------------------------
@overrides
def sample_xy(
self,
instance: "Instance",
sample: Sample,
) -> Tuple[Dict, Dict]:
return self.dynamic.sample_xy(instance, sample)
@overrides
def pre_fit(self, pre: List[Any]) -> None:
self.dynamic.pre_fit(pre)
def sample_predict(
self,
instance: "Instance",
sample: Sample,
) -> List[ConstraintName]:
return self.dynamic.sample_predict(instance, sample)
@overrides
def pre_sample_xy(self, instance: Instance, sample: Sample) -> Any:
return self.dynamic.pre_sample_xy(instance, sample)
@overrides
def fit_xy(
self,
x: Dict[ConstraintCategory, np.ndarray],
y: Dict[ConstraintCategory, np.ndarray],
) -> None:
self.dynamic.fit_xy(x, y)
@overrides
def sample_evaluate(
self,
instance: "Instance",
sample: Sample,
) -> Dict[ConstraintCategory, Dict[str, float]]:
return self.dynamic.sample_evaluate(instance, sample)

View File

@@ -1,108 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import sys
from copy import deepcopy
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from .component import Component
from ..extractors import *
logger = logging.getLogger(__name__)
class DynamicLazyConstraintsComponent(Component):
"""
A component that predicts which lazy constraints to enforce.
"""
def __init__(self,
classifier=CountingClassifier(),
threshold=0.05):
self.violations = set()
self.count = {}
self.n_samples = 0
self.threshold = threshold
self.classifier_prototype = classifier
self.classifiers = {}
def before_solve(self, solver, instance, model):
instance.found_violated_lazy_constraints = []
logger.info("Predicting violated lazy constraints...")
violations = self.predict(instance)
logger.info("Enforcing %d lazy constraints..." % len(violations))
for v in violations:
cut = instance.build_lazy_constraint(model, v)
solver.internal_solver.add_constraint(cut)
def after_iteration(self, solver, instance, model):
logger.debug("Finding violated (dynamic) lazy constraints...")
violations = instance.find_violated_lazy_constraints(model)
if len(violations) == 0:
return False
instance.found_violated_lazy_constraints += violations
logger.debug(" %d violations found" % len(violations))
for v in violations:
cut = instance.build_lazy_constraint(model, v)
solver.internal_solver.add_constraint(cut)
return True
def after_solve(self, solver, instance, model, results):
pass
def fit(self, training_instances):
logger.debug("Fitting...")
features = InstanceFeaturesExtractor().extract(training_instances)
self.classifiers = {}
violation_to_instance_idx = {}
for (idx, instance) in enumerate(training_instances):
for v in instance.found_violated_lazy_constraints:
if isinstance(v, list):
v = tuple(v)
if v not in self.classifiers:
self.classifiers[v] = deepcopy(self.classifier_prototype)
violation_to_instance_idx[v] = []
violation_to_instance_idx[v] += [idx]
for (v, classifier) in tqdm(self.classifiers.items(),
desc="Fit (lazy)",
disable=not sys.stdout.isatty(),
):
logger.debug("Training: %s" % (str(v)))
label = np.zeros(len(training_instances))
label[violation_to_instance_idx[v]] = 1.0
classifier.fit(features, label)
def predict(self, instance):
violations = []
features = InstanceFeaturesExtractor().extract([instance])
for (v, classifier) in self.classifiers.items():
proba = classifier.predict_proba(features)
if proba[0][1] > self.threshold:
violations += [v]
return violations
def evaluate(self, instances):
results = {}
all_violations = set()
for instance in instances:
all_violations |= set(instance.found_violated_lazy_constraints)
for idx in tqdm(range(len(instances)),
desc="Evaluate (lazy)",
disable=not sys.stdout.isatty(),
):
instance = instances[idx]
condition_positive = set(instance.found_violated_lazy_constraints)
condition_negative = all_violations - condition_positive
pred_positive = set(self.predict(instance)) & all_violations
pred_negative = all_violations - pred_positive
tp = len(pred_positive & condition_positive)
tn = len(pred_negative & condition_negative)
fp = len(pred_positive & condition_negative)
fn = len(pred_negative & condition_positive)
results[idx] = classifier_evaluation_dict(tp, tn, fp, fn)
return results

View File

@@ -1,179 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import sys
from copy import deepcopy
from miplearn.classifiers.counting import CountingClassifier
from .component import Component
from ..extractors import *
logger = logging.getLogger(__name__)
class LazyConstraint:
def __init__(self, cid, obj):
self.cid = cid
self.obj = obj
class StaticLazyConstraintsComponent(Component):
def __init__(self,
classifier=CountingClassifier(),
threshold=0.05,
use_two_phase_gap=True,
large_gap=1e-2,
violation_tolerance=-0.5,
):
self.threshold = threshold
self.classifier_prototype = classifier
self.classifiers = {}
self.pool = []
self.original_gap = None
self.large_gap = large_gap
self.is_gap_large = False
self.use_two_phase_gap = use_two_phase_gap
self.violation_tolerance = violation_tolerance
def before_solve(self, solver, instance, model):
self.pool = []
if not solver.use_lazy_cb and self.use_two_phase_gap:
logger.info("Increasing gap tolerance to %f", self.large_gap)
self.original_gap = solver.gap_tolerance
self.is_gap_large = True
solver.internal_solver.set_gap_tolerance(self.large_gap)
instance.found_violated_lazy_constraints = []
if instance.has_static_lazy_constraints():
self._extract_and_predict_static(solver, instance)
def after_solve(self, solver, instance, model, results):
pass
def after_iteration(self, solver, instance, model):
if solver.use_lazy_cb:
return False
else:
should_repeat = self._check_and_add(instance, solver)
if should_repeat:
return True
else:
if self.is_gap_large:
logger.info("Restoring gap tolerance to %f", self.original_gap)
solver.internal_solver.set_gap_tolerance(self.original_gap)
self.is_gap_large = False
return True
else:
return False
def on_lazy_callback(self, solver, instance, model):
self._check_and_add(instance, solver)
def _check_and_add(self, instance, solver):
logger.debug("Finding violated lazy constraints...")
constraints_to_add = []
for c in self.pool:
if not solver.internal_solver.is_constraint_satisfied(c.obj,
tol=self.violation_tolerance):
constraints_to_add.append(c)
for c in constraints_to_add:
self.pool.remove(c)
solver.internal_solver.add_constraint(c.obj)
instance.found_violated_lazy_constraints += [c.cid]
if len(constraints_to_add) > 0:
logger.info("%8d lazy constraints added %8d in the pool" % (len(constraints_to_add), len(self.pool)))
return True
else:
return False
def fit(self, training_instances):
training_instances = [t
for t in training_instances
if hasattr(t, "found_violated_lazy_constraints")]
logger.debug("Extracting x and y...")
x = self.x(training_instances)
y = self.y(training_instances)
logger.debug("Fitting...")
for category in tqdm(x.keys(),
desc="Fit (lazy)",
disable=not sys.stdout.isatty()):
if category not in self.classifiers:
self.classifiers[category] = deepcopy(self.classifier_prototype)
self.classifiers[category].fit(x[category], y[category])
def predict(self, instance):
pass
def evaluate(self, instances):
pass
def _extract_and_predict_static(self, solver, instance):
x = {}
constraints = {}
logger.info("Extracting lazy constraints...")
for cid in solver.internal_solver.get_constraint_ids():
if instance.is_constraint_lazy(cid):
category = instance.get_constraint_category(cid)
if category not in x:
x[category] = []
constraints[category] = []
x[category] += [instance.get_constraint_features(cid)]
c = LazyConstraint(cid=cid,
obj=solver.internal_solver.extract_constraint(cid))
constraints[category] += [c]
self.pool.append(c)
logger.info("%8d lazy constraints extracted" % len(self.pool))
logger.info("Predicting required lazy constraints...")
n_added = 0
for (category, x_values) in x.items():
if category not in self.classifiers:
continue
if isinstance(x_values[0], np.ndarray):
x[category] = np.array(x_values)
proba = self.classifiers[category].predict_proba(x[category])
for i in range(len(proba)):
if proba[i][1] > self.threshold:
n_added += 1
c = constraints[category][i]
self.pool.remove(c)
solver.internal_solver.add_constraint(c.obj)
instance.found_violated_lazy_constraints += [c.cid]
logger.info("%8d lazy constraints added %8d in the pool" % (n_added, len(self.pool)))
def _collect_constraints(self, train_instances):
constraints = {}
for instance in train_instances:
for cid in instance.found_violated_lazy_constraints:
category = instance.get_constraint_category(cid)
if category not in constraints:
constraints[category] = set()
constraints[category].add(cid)
for (category, cids) in constraints.items():
constraints[category] = sorted(list(cids))
return constraints
def x(self, train_instances):
result = {}
constraints = self._collect_constraints(train_instances)
for (category, cids) in constraints.items():
result[category] = []
for instance in train_instances:
for cid in cids:
result[category].append(instance.get_constraint_features(cid))
return result
def y(self, train_instances):
result = {}
constraints = self._collect_constraints(train_instances)
for (category, cids) in constraints.items():
result[category] = []
for instance in train_instances:
for cid in cids:
if cid in instance.found_violated_lazy_constraints:
result[category].append([0, 1])
else:
result[category].append([1, 0])
return result

View File

@@ -1,13 +1,24 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from sklearn.metrics import mean_squared_error, explained_variance_score, max_error, mean_absolute_error, r2_score
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
from sklearn.linear_model import LinearRegression
from copy import deepcopy
import numpy as np
import logging
from typing import List, Dict, Any, TYPE_CHECKING, Tuple, Optional, cast
import numpy as np
from overrides import overrides
from sklearn.linear_model import LinearRegression
from miplearn.classifiers import Regressor
from miplearn.classifiers.sklearn import ScikitLearnRegressor
from miplearn.components.component import Component
from miplearn.features.sample import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
logger = logging.getLogger(__name__)
@@ -15,71 +26,101 @@ class ObjectiveValueComponent(Component):
"""
A Component which predicts the optimal objective value of the problem.
"""
def __init__(self,
regressor=LinearRegression()):
self.ub_regressor = None
self.lb_regressor = None
def __init__(
self,
regressor: Regressor = ScikitLearnRegressor(LinearRegression()),
) -> None:
assert isinstance(regressor, Regressor)
self.regressors: Dict[str, Regressor] = {}
self.regressor_prototype = regressor
def before_solve(self, solver, instance, model):
if self.ub_regressor is not None:
@overrides
def before_solve_mip(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
logger.info("Predicting optimal value...")
lb, ub = self.predict([instance])[0]
instance.predicted_ub = ub
instance.predicted_lb = lb
logger.info("Predicted values: lb=%.2f, ub=%.2f" % (lb, ub))
pred = self.sample_predict(sample)
for (c, v) in pred.items():
logger.info(f"Predicted {c.lower()}: %.6e" % v)
stats[f"Objective: Predicted {c.lower()}"] = v # type: ignore
def after_solve(self, solver, instance, model, results):
if self.ub_regressor is not None:
results["Predicted UB"] = instance.predicted_ub
results["Predicted LB"] = instance.predicted_lb
@overrides
def fit_xy(
self,
x: Dict[str, np.ndarray],
y: Dict[str, np.ndarray],
) -> None:
for c in ["Upper bound", "Lower bound"]:
if c in y:
self.regressors[c] = self.regressor_prototype.clone()
self.regressors[c].fit(x[c], y[c])
def sample_predict(self, sample: Sample) -> Dict[str, float]:
pred: Dict[str, float] = {}
x, _ = self.sample_xy(None, sample)
for c in ["Upper bound", "Lower bound"]:
if c in self.regressors is not None:
pred[c] = self.regressors[c].predict(np.array(x[c]))[0, 0]
else:
results["Predicted UB"] = None
results["Predicted LB"] = None
logger.info(f"{c} regressor not fitted. Skipping.")
return pred
def fit(self, training_instances):
logger.debug("Extracting features...")
features = InstanceFeaturesExtractor().extract(training_instances)
ub = ObjectiveValueExtractor(kind="upper bound").extract(training_instances)
lb = ObjectiveValueExtractor(kind="lower bound").extract(training_instances)
assert ub.shape == (len(training_instances), 1)
assert lb.shape == (len(training_instances), 1)
self.ub_regressor = deepcopy(self.regressor_prototype)
self.lb_regressor = deepcopy(self.regressor_prototype)
logger.debug("Fitting ub_regressor...")
self.ub_regressor.fit(features, ub.ravel())
logger.debug("Fitting ub_regressor...")
self.lb_regressor.fit(features, lb.ravel())
@overrides
def sample_xy(
self,
_: Optional[Instance],
sample: Sample,
) -> Tuple[Dict[str, List[List[float]]], Dict[str, List[List[float]]]]:
lp_instance_features_np = sample.get_array("lp_instance_features")
if lp_instance_features_np is None:
lp_instance_features_np = sample.get_array("static_instance_features")
assert lp_instance_features_np is not None
lp_instance_features = cast(List[float], lp_instance_features_np.tolist())
def predict(self, instances):
features = InstanceFeaturesExtractor().extract(instances)
lb = self.lb_regressor.predict(features)
ub = self.ub_regressor.predict(features)
assert lb.shape == (len(instances),)
assert ub.shape == (len(instances),)
return np.array([lb, ub]).T
def evaluate(self, instances):
y_pred = self.predict(instances)
y_true = np.array([[inst.lower_bound, inst.upper_bound] for inst in instances])
y_true_lb, y_true_ub = y_true[:, 0], y_true[:, 1]
y_pred_lb, y_pred_ub = y_pred[:, 1], y_pred[:, 1]
ev = {
"Lower bound": {
"Mean squared error": mean_squared_error(y_true_lb, y_pred_lb),
"Explained variance": explained_variance_score(y_true_lb, y_pred_lb),
"Max error": max_error(y_true_lb, y_pred_lb),
"Mean absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
"R2": r2_score(y_true_lb, y_pred_lb),
"Median absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
},
"Upper bound": {
"Mean squared error": mean_squared_error(y_true_ub, y_pred_ub),
"Explained variance": explained_variance_score(y_true_ub, y_pred_ub),
"Max error": max_error(y_true_ub, y_pred_ub),
"Mean absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
"R2": r2_score(y_true_ub, y_pred_ub),
"Median absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
},
# Features
x: Dict[str, List[List[float]]] = {
"Upper bound": [lp_instance_features],
"Lower bound": [lp_instance_features],
}
return ev
# Labels
y: Dict[str, List[List[float]]] = {}
mip_lower_bound = sample.get_scalar("mip_lower_bound")
mip_upper_bound = sample.get_scalar("mip_upper_bound")
if mip_lower_bound is not None:
y["Lower bound"] = [[mip_lower_bound]]
if mip_upper_bound is not None:
y["Upper bound"] = [[mip_upper_bound]]
return x, y
@overrides
def sample_evaluate(
self,
instance: Instance,
sample: Sample,
) -> Dict[str, Dict[str, float]]:
def compare(y_pred: float, y_actual: float) -> Dict[str, float]:
err = np.round(abs(y_pred - y_actual), 8)
return {
"Actual value": y_actual,
"Predicted value": y_pred,
"Absolute error": err,
"Relative error": err / y_actual,
}
result: Dict[str, Dict[str, float]] = {}
pred = self.sample_predict(sample)
actual_ub = sample.get_scalar("mip_upper_bound")
actual_lb = sample.get_scalar("mip_lower_bound")
if actual_ub is not None:
result["Upper bound"] = compare(pred["Upper bound"], actual_ub)
if actual_lb is not None:
result["Lower bound"] = compare(pred["Lower bound"], actual_lb)
return result

View File

@@ -1,150 +1,242 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from copy import deepcopy
import sys
import logging
from typing import Dict, List, Any, TYPE_CHECKING, Tuple, Optional
from .component import Component
from ..classifiers.adaptive import AdaptiveClassifier
from ..classifiers.threshold import MinPrecisionThreshold, DynamicThreshold
from ..components import classifier_evaluation_dict
from ..extractors import *
import numpy as np
from overrides import overrides
from miplearn.classifiers import Classifier
from miplearn.classifiers.adaptive import AdaptiveClassifier
from miplearn.classifiers.threshold import MinPrecisionThreshold, Threshold
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.features.sample import Sample
from miplearn.instance.base import Instance
from miplearn.types import (
LearningSolveStats,
Category,
Solution,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
class PrimalSolutionComponent(Component):
"""
A component that predicts primal solutions.
A component that predicts the optimal primal values for the binary decision
variables.
In exact mode, predicted primal solutions are provided to the solver as MIP
starts. In heuristic mode, this component fixes the decision variables to their
predicted values.
"""
def __init__(self,
classifier=AdaptiveClassifier(),
mode="exact",
threshold=MinPrecisionThreshold(0.98)):
def __init__(
self,
classifier: Classifier = AdaptiveClassifier(),
mode: str = "exact",
threshold: Threshold = MinPrecisionThreshold([0.98, 0.98]),
) -> None:
assert isinstance(classifier, Classifier)
assert isinstance(threshold, Threshold)
assert mode in ["exact", "heuristic"]
self.mode = mode
self.classifiers = {}
self.thresholds = {}
self.classifiers: Dict[Category, Classifier] = {}
self.thresholds: Dict[Category, Threshold] = {}
self.threshold_prototype = threshold
self.classifier_prototype = classifier
def before_solve(self, solver, instance, model):
@overrides
def before_solve_mip(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
logger.info("Predicting primal solution...")
solution = self.predict(instance)
# Do nothing if models are not trained
if len(self.classifiers) == 0:
logger.info("Classifiers not fitted. Skipping.")
return
# Predict solution and provide it to the solver
solution = self.sample_predict(sample)
assert solver.internal_solver is not None
if self.mode == "heuristic":
solver.internal_solver.fix(solution)
else:
solver.internal_solver.set_warm_start(solution)
def after_solve(self, solver, instance, model, results):
pass
# Update statistics
stats["Primal: Free"] = 0
stats["Primal: Zero"] = 0
stats["Primal: One"] = 0
for (var_name, value) in solution.items():
if value is None:
stats["Primal: Free"] += 1
else:
if value < 0.5:
stats["Primal: Zero"] += 1
else:
stats["Primal: One"] += 1
logger.info(
f"Predicted: free: {stats['Primal: Free']}, "
f"zero: {stats['Primal: Zero']}, "
f"one: {stats['Primal: One']}"
)
def x(self, training_instances):
return VariableFeaturesExtractor().extract(training_instances)
def sample_predict(self, sample: Sample) -> Solution:
var_names = sample.get_array("static_var_names")
var_categories = sample.get_array("static_var_categories")
assert var_names is not None
assert var_categories is not None
def y(self, training_instances):
return SolutionExtractor().extract(training_instances)
# Compute y_pred
x, _ = self.sample_xy(None, sample)
y_pred = {}
for category in x.keys():
assert category in self.classifiers, (
f"Classifier for category {category} has not been trained. "
f"Please call component.fit before component.predict."
)
xc = np.array(x[category])
proba = self.classifiers[category].predict_proba(xc)
thr = self.thresholds[category].predict(xc)
y_pred[category] = np.vstack(
[
proba[:, 0] >= thr[0],
proba[:, 1] >= thr[1],
]
).T
def fit(self, training_instances, n_jobs=1):
logger.debug("Extracting features...")
features = VariableFeaturesExtractor().extract(training_instances)
solutions = SolutionExtractor().extract(training_instances)
for category in tqdm(features.keys(),
desc="Fit (primal)",
disable=not sys.stdout.isatty(),
):
x_train = features[category]
for label in [0, 1]:
y_train = solutions[category][:, label].astype(int)
# If all samples are either positive or negative, make constant predictions
y_avg = np.average(y_train)
if y_avg < 0.001 or y_avg >= 0.999:
self.classifiers[category, label] = round(y_avg)
self.thresholds[category, label] = 0.50
# Convert y_pred into solution
solution: Solution = {v: None for v in var_names}
category_offset: Dict[Category, int] = {cat: 0 for cat in x.keys()}
for (i, var_name) in enumerate(var_names):
category = var_categories[i]
if category not in category_offset:
continue
offset = category_offset[category]
category_offset[category] += 1
if y_pred[category][offset, 0]:
solution[var_name] = 0.0
if y_pred[category][offset, 1]:
solution[var_name] = 1.0
# Create a copy of classifier prototype and train it
if isinstance(self.classifier_prototype, list):
clf = deepcopy(self.classifier_prototype[label])
else:
clf = deepcopy(self.classifier_prototype)
clf.fit(x_train, y_train)
# Find threshold (dynamic or static)
if isinstance(self.threshold_prototype, DynamicThreshold):
self.thresholds[category, label] = self.threshold_prototype.find(clf, x_train, y_train)
else:
self.thresholds[category, label] = deepcopy(self.threshold_prototype)
self.classifiers[category, label] = clf
def predict(self, instance):
solution = {}
x_test = VariableFeaturesExtractor().extract([instance])
var_split = Extractor.split_variables(instance)
for category in var_split.keys():
n = len(var_split[category])
for (i, (var, index)) in enumerate(var_split[category]):
if var not in solution.keys():
solution[var] = {}
solution[var][index] = None
for label in [0, 1]:
if (category, label) not in self.classifiers.keys():
continue
clf = self.classifiers[category, label]
if isinstance(clf, float) or isinstance(clf, int):
ws = np.array([[1 - clf, clf] for _ in range(n)])
else:
ws = clf.predict_proba(x_test[category])
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (n, ws.shape)
for (i, (var, index)) in enumerate(var_split[category]):
if ws[i, 1] >= self.thresholds[category, label]:
solution[var][index] = label
return solution
def evaluate(self, instances):
ev = {"Fix zero": {},
"Fix one": {}}
for instance_idx in tqdm(range(len(instances)),
desc="Evaluate (primal)",
disable=not sys.stdout.isatty(),
):
instance = instances[instance_idx]
solution_actual = instance.solution
solution_pred = self.predict(instance)
@overrides
def sample_xy(
self,
_: Optional[Instance],
sample: Sample,
) -> Tuple[Dict[Category, List[List[float]]], Dict[Category, List[List[float]]]]:
x: Dict = {}
y: Dict = {}
instance_features = sample.get_array("static_instance_features")
mip_var_values = sample.get_array("mip_var_values")
var_features = sample.get_array("lp_var_features")
var_names = sample.get_array("static_var_names")
var_types = sample.get_array("static_var_types")
var_categories = sample.get_array("static_var_categories")
if var_features is None:
var_features = sample.get_array("static_var_features")
assert instance_features is not None
assert var_features is not None
assert var_names is not None
assert var_types is not None
assert var_categories is not None
for (i, var_name) in enumerate(var_names):
# Skip non-binary variables
if var_types[i] != b"B":
continue
# Initialize categories
category = var_categories[i]
if len(category) == 0:
continue
if category not in x.keys():
x[category] = []
y[category] = []
# Features
features = list(instance_features)
features.extend(var_features[i])
x[category].append(features)
# Labels
if mip_var_values is not None:
opt_value = mip_var_values[i]
assert opt_value is not None
y[category].append([opt_value < 0.5, opt_value >= 0.5])
return x, y
@overrides
def sample_evaluate(
self,
_: Optional[Instance],
sample: Sample,
) -> Dict[str, Dict[str, float]]:
mip_var_values = sample.get_array("mip_var_values")
var_names = sample.get_array("static_var_names")
assert mip_var_values is not None
assert var_names is not None
solution_actual = {
var_name: mip_var_values[i] for (i, var_name) in enumerate(var_names)
}
solution_pred = self.sample_predict(sample)
vars_all, vars_one, vars_zero = set(), set(), set()
pred_one_positive, pred_zero_positive = set(), set()
for (varname, var_dict) in solution_actual.items():
if varname not in solution_pred.keys():
continue
for (idx, value) in var_dict.items():
vars_all.add((varname, idx))
if value > 0.5:
vars_one.add((varname, idx))
for (var_name, value_actual) in solution_actual.items():
vars_all.add(var_name)
if value_actual > 0.5:
vars_one.add(var_name)
else:
vars_zero.add((varname, idx))
if solution_pred[varname][idx] is not None:
if solution_pred[varname][idx] > 0.5:
pred_one_positive.add((varname, idx))
vars_zero.add(var_name)
value_pred = solution_pred[var_name]
if value_pred is not None:
if value_pred > 0.5:
pred_one_positive.add(var_name)
else:
pred_zero_positive.add((varname, idx))
pred_zero_positive.add(var_name)
pred_one_negative = vars_all - pred_one_positive
pred_zero_negative = vars_all - pred_zero_positive
return {
"0": classifier_evaluation_dict(
tp=len(pred_zero_positive & vars_zero),
tn=len(pred_zero_negative & vars_one),
fp=len(pred_zero_positive & vars_one),
fn=len(pred_zero_negative & vars_zero),
),
"1": classifier_evaluation_dict(
tp=len(pred_one_positive & vars_one),
tn=len(pred_one_negative & vars_zero),
fp=len(pred_one_positive & vars_zero),
fn=len(pred_one_negative & vars_one),
),
}
tp_zero = len(pred_zero_positive & vars_zero)
fp_zero = len(pred_zero_positive & vars_one)
tn_zero = len(pred_zero_negative & vars_one)
fn_zero = len(pred_zero_negative & vars_zero)
tp_one = len(pred_one_positive & vars_one)
fp_one = len(pred_one_positive & vars_zero)
tn_one = len(pred_one_negative & vars_zero)
fn_one = len(pred_one_negative & vars_one)
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(tp_zero, tn_zero, fp_zero, fn_zero)
ev["Fix one"][instance_idx] = classifier_evaluation_dict(tp_one, tn_one, fp_one, fn_one)
return ev
@overrides
def fit_xy(
self,
x: Dict[Category, np.ndarray],
y: Dict[Category, np.ndarray],
) -> None:
for category in x.keys():
clf = self.classifier_prototype.clone()
thr = self.threshold_prototype.clone()
clf.fit(x[category], y[category])
thr.fit(clf, x[category], y[category])
self.classifiers[category] = clf
self.thresholds[category] = thr

View File

@@ -1,151 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import sys
from copy import deepcopy
import numpy as np
from miplearn.components import classifier_evaluation_dict
from tqdm import tqdm
from miplearn import Component
from miplearn.classifiers.counting import CountingClassifier
logger = logging.getLogger(__name__)
class RelaxationComponent(Component):
"""
A Component which builds a relaxation of the problem by dropping constraints.
Currently, this component drops all integrality constraints, as well as
all inequality constraints which are not likely binding in the LP relaxation.
In a future version of MIPLearn, this component may decide to keep some
integrality constraints it it determines that they have small impact on
running time, but large impact on dual bound.
"""
def __init__(self,
classifier=CountingClassifier(),
threshold=0.95,
slack_tolerance=1e-5,
):
self.classifiers = {}
self.classifier_prototype = classifier
self.threshold = threshold
self.slack_tolerance = slack_tolerance
def before_solve(self, solver, instance, _):
logger.info("Relaxing integrality...")
solver.internal_solver.relax()
logger.info("Predicting redundant LP constraints...")
cids = solver.internal_solver.get_constraint_ids()
x, constraints = self.x([instance],
constraint_ids=cids,
return_constraints=True)
y = self.predict(x)
n_removed = 0
for category in y.keys():
for i in range(len(y[category])):
if y[category][i][0] == 1:
cid = constraints[category][i]
solver.internal_solver.extract_constraint(cid)
n_removed += 1
logger.info("Removed %d predicted redundant LP constraints" % n_removed)
def after_solve(self, solver, instance, model, results):
instance.slacks = solver.internal_solver.get_constraint_slacks()
def fit(self, training_instances):
training_instances = [instance
for instance in training_instances
if hasattr(instance, "slacks")]
logger.debug("Extracting x and y...")
x = self.x(training_instances)
y = self.y(training_instances)
logger.debug("Fitting...")
for category in tqdm(x.keys(),
desc="Fit (relaxation)",
disable=not sys.stdout.isatty()):
if category not in self.classifiers:
self.classifiers[category] = deepcopy(self.classifier_prototype)
self.classifiers[category].fit(x[category], y[category])
def x(self,
instances,
constraint_ids=None,
return_constraints=False):
x = {}
constraints = {}
for instance in instances:
if constraint_ids is not None:
cids = constraint_ids
else:
cids = instance.slacks.keys()
for cid in cids:
category = instance.get_constraint_category(cid)
if category is None:
continue
if category not in x:
x[category] = []
constraints[category] = []
x[category] += [instance.get_constraint_features(cid)]
constraints[category] += [cid]
if return_constraints:
return x, constraints
else:
return x
def y(self, instances):
y = {}
for instance in instances:
for (cid, slack) in instance.slacks.items():
category = instance.get_constraint_category(cid)
if category is None:
continue
if category not in y:
y[category] = []
if slack > self.slack_tolerance:
y[category] += [[1]]
else:
y[category] += [[0]]
return y
def predict(self, x):
y = {}
for (category, x_cat) in x.items():
if category not in self.classifiers:
continue
y[category] = []
#x_cat = np.array(x_cat)
proba = self.classifiers[category].predict_proba(x_cat)
for i in range(len(proba)):
if proba[i][1] >= self.threshold:
y[category] += [[1]]
else:
y[category] += [[0]]
return y
def evaluate(self, instance):
x = self.x([instance])
y_true = self.y([instance])
y_pred = self.predict(x)
tp, tn, fp, fn = 0, 0, 0, 0
for category in y_true.keys():
for i in range(len(y_true[category])):
if y_pred[category][i][0] == 1:
if y_true[category][i][0] == 1:
tp += 1
else:
fp += 1
else:
if y_true[category][i][0] == 1:
fn += 1
else:
tn += 1
return classifier_evaluation_dict(tp, tn, fp, fn)

View File

@@ -0,0 +1,252 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import Dict, Tuple, List, Any, TYPE_CHECKING, Set, Optional
import numpy as np
from overrides import overrides
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.threshold import MinProbabilityThreshold, Threshold
from miplearn.components.component import Component
from miplearn.features.sample import Sample
from miplearn.solvers.internal import Constraints
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats, ConstraintName, ConstraintCategory
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
class LazyConstraint:
def __init__(self, cid: ConstraintName, obj: Any) -> None:
self.cid = cid
self.obj = obj
class StaticLazyConstraintsComponent(Component):
"""
Component that decides which of the constraints tagged as lazy should
be kept in the formulation, and which should be removed.
"""
def __init__(
self,
classifier: Classifier = CountingClassifier(),
threshold: Threshold = MinProbabilityThreshold([0.50, 0.50]),
violation_tolerance: float = -0.5,
) -> None:
assert isinstance(classifier, Classifier)
self.classifier_prototype: Classifier = classifier
self.threshold_prototype: Threshold = threshold
self.classifiers: Dict[ConstraintCategory, Classifier] = {}
self.thresholds: Dict[ConstraintCategory, Threshold] = {}
self.pool: Constraints = Constraints()
self.violation_tolerance: float = violation_tolerance
self.enforced_cids: Set[ConstraintName] = set()
self.n_restored: int = 0
self.n_iterations: int = 0
@overrides
def after_solve_mip(
self,
solver: "LearningSolver",
instance: "Instance",
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
sample.put_array(
"mip_constr_lazy_enforced",
np.array(list(self.enforced_cids), dtype="S"),
)
stats["LazyStatic: Restored"] = self.n_restored
stats["LazyStatic: Iterations"] = self.n_iterations
@overrides
def before_solve_mip(
self,
solver: "LearningSolver",
instance: "Instance",
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
assert solver.internal_solver is not None
static_lazy_count = sample.get_scalar("static_constr_lazy_count")
assert static_lazy_count is not None
logger.info("Predicting violated (static) lazy constraints...")
if static_lazy_count == 0:
logger.info("Instance does not have static lazy constraints. Skipping.")
self.enforced_cids = set(self.sample_predict(sample))
logger.info("Moving lazy constraints to the pool...")
constraints = Constraints.from_sample(sample)
assert constraints.lazy is not None
assert constraints.names is not None
selected = [
(constraints.lazy[i] and constraints.names[i] not in self.enforced_cids)
for i in range(len(constraints.lazy))
]
n_removed = sum(selected)
n_kept = sum(constraints.lazy) - n_removed
self.pool = constraints[selected]
assert self.pool.names is not None
solver.internal_solver.remove_constraints(self.pool.names)
logger.info(f"{n_kept} lazy constraints kept; {n_removed} moved to the pool")
stats["LazyStatic: Removed"] = n_removed
stats["LazyStatic: Kept"] = n_kept
stats["LazyStatic: Restored"] = 0
self.n_restored = 0
self.n_iterations = 0
@overrides
def fit_xy(
self,
x: Dict[ConstraintCategory, np.ndarray],
y: Dict[ConstraintCategory, np.ndarray],
) -> None:
for c in y.keys():
assert c in x
self.classifiers[c] = self.classifier_prototype.clone()
self.thresholds[c] = self.threshold_prototype.clone()
self.classifiers[c].fit(x[c], y[c])
self.thresholds[c].fit(self.classifiers[c], x[c], y[c])
@overrides
def iteration_cb(
self,
solver: "LearningSolver",
instance: "Instance",
model: Any,
) -> bool:
if solver.use_lazy_cb:
return False
else:
return self._check_and_add(solver)
@overrides
def lazy_cb(
self,
solver: "LearningSolver",
instance: "Instance",
model: Any,
) -> None:
self._check_and_add(solver)
def sample_predict(self, sample: Sample) -> List[ConstraintName]:
x, y, cids = self._sample_xy_with_cids(sample)
enforced_cids: List[ConstraintName] = []
for category in x.keys():
if category not in self.classifiers:
continue
npx = np.array(x[category])
proba = self.classifiers[category].predict_proba(npx)
thr = self.thresholds[category].predict(npx)
pred = list(proba[:, 1] > thr[1])
for (i, is_selected) in enumerate(pred):
if is_selected:
enforced_cids += [cids[category][i]]
return enforced_cids
@overrides
def sample_xy(
self,
_: Optional[Instance],
sample: Sample,
) -> Tuple[
Dict[ConstraintCategory, List[List[float]]],
Dict[ConstraintCategory, List[List[float]]],
]:
x, y, __ = self._sample_xy_with_cids(sample)
return x, y
def _check_and_add(self, solver: "LearningSolver") -> bool:
assert solver.internal_solver is not None
assert self.pool.names is not None
if len(self.pool.names) == 0:
logger.info("Lazy constraint pool is empty. Skipping violation check.")
return False
self.n_iterations += 1
logger.info("Finding violated lazy constraints...")
is_satisfied = solver.internal_solver.are_constraints_satisfied(
self.pool,
tol=self.violation_tolerance,
)
is_violated = [not i for i in is_satisfied]
violated_constraints = self.pool[is_violated]
satisfied_constraints = self.pool[is_satisfied]
self.pool = satisfied_constraints
assert violated_constraints.names is not None
assert satisfied_constraints.names is not None
n_violated = len(violated_constraints.names)
n_satisfied = len(satisfied_constraints.names)
logger.info(f"Found {n_violated} violated lazy constraints found")
if n_violated > 0:
logger.info(
f"Enforcing {n_violated} lazy constraints; "
f"{n_satisfied} left in the pool..."
)
solver.internal_solver.add_constraints(violated_constraints)
for (i, name) in enumerate(violated_constraints.names):
self.enforced_cids.add(name)
self.n_restored += 1
return True
else:
return False
def _sample_xy_with_cids(
self, sample: Sample
) -> Tuple[
Dict[ConstraintCategory, List[List[float]]],
Dict[ConstraintCategory, List[List[float]]],
Dict[ConstraintCategory, List[ConstraintName]],
]:
x: Dict[ConstraintCategory, List[List[float]]] = {}
y: Dict[ConstraintCategory, List[List[float]]] = {}
cids: Dict[ConstraintCategory, List[ConstraintName]] = {}
instance_features = sample.get_array("static_instance_features")
constr_features = sample.get_array("lp_constr_features")
constr_names = sample.get_array("static_constr_names")
constr_categories = sample.get_array("static_constr_categories")
constr_lazy = sample.get_array("static_constr_lazy")
lazy_enforced = sample.get_array("mip_constr_lazy_enforced")
if constr_features is None:
constr_features = sample.get_array("static_constr_features")
assert instance_features is not None
assert constr_features is not None
assert constr_names is not None
assert constr_categories is not None
assert constr_lazy is not None
for (cidx, cname) in enumerate(constr_names):
# Initialize categories
if not constr_lazy[cidx]:
continue
category = constr_categories[cidx]
if len(category) == 0:
continue
if category not in x:
x[category] = []
y[category] = []
cids[category] = []
# Features
features = list(instance_features)
features.extend(constr_features[cidx])
x[category].append(features)
cids[category].append(cname)
# Labels
if lazy_enforced is not None:
if cname in lazy_enforced:
y[category] += [[False, True]]
else:
y[category] += [[True, False]]
return x, y, cids

View File

@@ -1,140 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock
import numpy as np
from miplearn import DynamicLazyConstraintsComponent, LearningSolver, InternalSolver
from miplearn.classifiers import Classifier
from miplearn.tests import get_test_pyomo_instances
from numpy.linalg import norm
E = 0.1
def test_lazy_fit():
instances, models = get_test_pyomo_instances()
instances[0].found_violated_lazy_constraints = ["a", "b"]
instances[1].found_violated_lazy_constraints = ["b", "c"]
classifier = Mock(spec=Classifier)
component = DynamicLazyConstraintsComponent(classifier=classifier)
component.fit(instances)
# Should create one classifier for each violation
assert "a" in component.classifiers
assert "b" in component.classifiers
assert "c" in component.classifiers
# Should provide correct x_train to each classifier
expected_x_train_a = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
expected_x_train_b = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
expected_x_train_c = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
assert norm(expected_x_train_a - actual_x_train_a) < E
assert norm(expected_x_train_b - actual_x_train_b) < E
assert norm(expected_x_train_c - actual_x_train_c) < E
# Should provide correct y_train to each classifier
expected_y_train_a = np.array([1.0, 0.0])
expected_y_train_b = np.array([1.0, 1.0])
expected_y_train_c = np.array([0.0, 1.0])
actual_y_train_a = component.classifiers["a"].fit.call_args[0][1]
actual_y_train_b = component.classifiers["b"].fit.call_args[0][1]
actual_y_train_c = component.classifiers["c"].fit.call_args[0][1]
assert norm(expected_y_train_a - actual_y_train_a) < E
assert norm(expected_y_train_b - actual_y_train_b) < E
assert norm(expected_y_train_c - actual_y_train_c) < E
def test_lazy_before():
instances, models = get_test_pyomo_instances()
instances[0].build_lazy_constraint = Mock(return_value="c1")
solver = LearningSolver()
solver.internal_solver = Mock(spec=InternalSolver)
component = DynamicLazyConstraintsComponent(threshold=0.10)
component.classifiers = {"a": Mock(spec=Classifier),
"b": Mock(spec=Classifier)}
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
component.before_solve(solver, instances[0], models[0])
# Should ask classifier likelihood of each constraint being violated
expected_x_test_a = np.array([[67., 21.75, 1287.92]])
expected_x_test_b = np.array([[67., 21.75, 1287.92]])
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
assert norm(expected_x_test_a - actual_x_test_a) < E
assert norm(expected_x_test_b - actual_x_test_b) < E
# Should ask instance to generate cut for constraints whose likelihood
# of being violated exceeds the threshold
instances[0].build_lazy_constraint.assert_called_once_with(models[0], "b")
# Should ask internal solver to add generated constraint
solver.internal_solver.add_constraint.assert_called_once_with("c1")
def test_lazy_evaluate():
instances, models = get_test_pyomo_instances()
component = DynamicLazyConstraintsComponent()
component.classifiers = {"a": Mock(spec=Classifier),
"b": Mock(spec=Classifier),
"c": Mock(spec=Classifier)}
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
instances[0].found_violated_lazy_constraints = ["a", "b", "c"]
instances[1].found_violated_lazy_constraints = ["b", "d"]
assert component.evaluate(instances) == {
0: {
"Accuracy": 0.75,
"F1 score": 0.8,
"Precision": 1.0,
"Recall": 2/3.,
"Predicted positive": 2,
"Predicted negative": 2,
"Condition positive": 3,
"Condition negative": 1,
"False negative": 1,
"False positive": 0,
"True negative": 1,
"True positive": 2,
"Predicted positive (%)": 50.0,
"Predicted negative (%)": 50.0,
"Condition positive (%)": 75.0,
"Condition negative (%)": 25.0,
"False negative (%)": 25.0,
"False positive (%)": 0,
"True negative (%)": 25.0,
"True positive (%)": 50.0,
},
1: {
"Accuracy": 0.5,
"F1 score": 0.5,
"Precision": 0.5,
"Recall": 0.5,
"Predicted positive": 2,
"Predicted negative": 2,
"Condition positive": 2,
"Condition negative": 2,
"False negative": 1,
"False positive": 1,
"True negative": 1,
"True positive": 1,
"Predicted positive (%)": 50.0,
"Predicted negative (%)": 50.0,
"Condition positive (%)": 50.0,
"Condition negative (%)": 50.0,
"False negative (%)": 25.0,
"False positive (%)": 25.0,
"True negative (%)": 25.0,
"True positive (%)": 25.0,
}
}

View File

@@ -1,188 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock, call
from miplearn import (StaticLazyConstraintsComponent,
LearningSolver,
Instance,
InternalSolver)
from miplearn.classifiers import Classifier
def test_usage_with_solver():
solver = Mock(spec=LearningSolver)
solver.use_lazy_cb = False
solver.gap_tolerance = 1e-4
internal = solver.internal_solver = Mock(spec=InternalSolver)
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
internal.is_constraint_satisfied = Mock(return_value=False)
instance = Mock(spec=Instance)
instance.has_static_lazy_constraints = Mock(return_value=True)
instance.is_constraint_lazy = Mock(side_effect=lambda cid: {
"c1": False,
"c2": True,
"c3": True,
"c4": True,
}[cid])
instance.get_constraint_features = Mock(side_effect=lambda cid: {
"c2": [1.0, 0.0],
"c3": [0.5, 0.5],
"c4": [1.0],
}[cid])
instance.get_constraint_category = Mock(side_effect=lambda cid: {
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
}[cid])
component = StaticLazyConstraintsComponent(threshold=0.90,
use_two_phase_gap=False,
violation_tolerance=1.0)
component.classifiers = {
"type-a": Mock(spec=Classifier),
"type-b": Mock(spec=Classifier),
}
component.classifiers["type-a"].predict_proba = \
Mock(return_value=[
[0.20, 0.80],
[0.05, 0.95],
])
component.classifiers["type-b"].predict_proba = \
Mock(return_value=[
[0.02, 0.98],
])
# LearningSolver calls before_solve
component.before_solve(solver, instance, None)
# Should ask if instance has static lazy constraints
instance.has_static_lazy_constraints.assert_called_once()
# Should ask internal solver for a list of constraints in the model
internal.get_constraint_ids.assert_called_once()
# Should ask if each constraint in the model is lazy
instance.is_constraint_lazy.assert_has_calls([
call("c1"), call("c2"), call("c3"), call("c4"),
])
# For the lazy ones, should ask for features
instance.get_constraint_features.assert_has_calls([
call("c2"), call("c3"), call("c4"),
])
# Should also ask for categories
assert instance.get_constraint_category.call_count == 3
instance.get_constraint_category.assert_has_calls([
call("c2"), call("c3"), call("c4"),
])
# Should ask internal solver to remove constraints identified as lazy
assert internal.extract_constraint.call_count == 3
internal.extract_constraint.assert_has_calls([
call("c2"), call("c3"), call("c4"),
])
# Should ask ML to predict whether each lazy constraint should be enforced
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
# For the ones that should be enforced, should ask solver to re-add them
# to the formulation. The remaining ones should remain in the pool.
assert internal.add_constraint.call_count == 2
internal.add_constraint.assert_has_calls([
call("<c3>"), call("<c4>"),
])
internal.add_constraint.reset_mock()
# LearningSolver calls after_iteration (first time)
should_repeat = component.after_iteration(solver, instance, None)
assert should_repeat
# Should ask internal solver to verify if constraints in the pool are
# satisfied and add the ones that are not
internal.is_constraint_satisfied.assert_called_once_with("<c2>", tol=1.0)
internal.is_constraint_satisfied.reset_mock()
internal.add_constraint.assert_called_once_with("<c2>")
internal.add_constraint.reset_mock()
# LearningSolver calls after_iteration (second time)
should_repeat = component.after_iteration(solver, instance, None)
assert not should_repeat
# The lazy constraint pool should be empty by now, so no calls should be made
internal.is_constraint_satisfied.assert_not_called()
internal.add_constraint.assert_not_called()
# Should update instance object
assert instance.found_violated_lazy_constraints == ["c3", "c4", "c2"]
def test_fit():
instance_1 = Mock(spec=Instance)
instance_1.found_violated_lazy_constraints = ["c1", "c2", "c4", "c5"]
instance_1.get_constraint_category = Mock(side_effect=lambda cid: {
"c1": "type-a",
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
"c5": "type-b",
}[cid])
instance_1.get_constraint_features = Mock(side_effect=lambda cid: {
"c1": [1, 1],
"c2": [1, 2],
"c3": [1, 3],
"c4": [1, 4, 0],
"c5": [1, 5, 0],
}[cid])
instance_2 = Mock(spec=Instance)
instance_2.found_violated_lazy_constraints = ["c2", "c3", "c4"]
instance_2.get_constraint_category = Mock(side_effect=lambda cid: {
"c1": "type-a",
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
"c5": "type-b",
}[cid])
instance_2.get_constraint_features = Mock(side_effect=lambda cid: {
"c1": [2, 1],
"c2": [2, 2],
"c3": [2, 3],
"c4": [2, 4, 0],
"c5": [2, 5, 0],
}[cid])
instances = [instance_1, instance_2]
component = StaticLazyConstraintsComponent()
component.classifiers = {
"type-a": Mock(spec=Classifier),
"type-b": Mock(spec=Classifier),
}
expected_constraints = {
"type-a": ["c1", "c2", "c3"],
"type-b": ["c4", "c5"],
}
expected_x = {
"type-a": [[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]],
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]]
}
expected_y = {
"type-a": [[0, 1], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]]
}
assert component._collect_constraints(instances) == expected_constraints
assert component.x(instances) == expected_x
assert component.y(instances) == expected_y
component.fit(instances)
component.classifiers["type-a"].fit.assert_called_once_with(expected_x["type-a"],
expected_y["type-a"])
component.classifiers["type-b"].fit.assert_called_once_with(expected_x["type-b"],
expected_y["type-b"])

View File

@@ -1,47 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock
import numpy as np
from miplearn import ObjectiveValueComponent
from miplearn.classifiers import Regressor
from miplearn.tests import get_test_pyomo_instances
def test_usage():
instances, models = get_test_pyomo_instances()
comp = ObjectiveValueComponent()
comp.fit(instances)
assert instances[0].lower_bound == 1183.0
assert instances[0].upper_bound == 1183.0
assert np.round(comp.predict(instances), 2).tolist() == [[1183.0, 1183.0],
[1070.0, 1070.0]]
def test_obj_evaluate():
instances, models = get_test_pyomo_instances()
reg = Mock(spec=Regressor)
reg.predict = Mock(return_value=np.array([1000.0, 1000.0]))
comp = ObjectiveValueComponent(regressor=reg)
comp.fit(instances)
ev = comp.evaluate(instances)
assert ev == {
'Lower bound': {
'Explained variance': 0.0,
'Max error': 183.0,
'Mean absolute error': 126.5,
'Mean squared error': 19194.5,
'Median absolute error': 126.5,
'R2': -5.012843605607331,
},
'Upper bound': {
'Explained variance': 0.0,
'Max error': 183.0,
'Mean absolute error': 126.5,
'Mean squared error': 19194.5,
'Median absolute error': 126.5,
'R2': -5.012843605607331,
}
}

View File

@@ -1,99 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock
import numpy as np
from miplearn import PrimalSolutionComponent
from miplearn.classifiers import Classifier
from miplearn.tests import get_test_pyomo_instances
def test_predict():
instances, models = get_test_pyomo_instances()
comp = PrimalSolutionComponent()
comp.fit(instances)
solution = comp.predict(instances[0])
assert "x" in solution
assert 0 in solution["x"]
assert 1 in solution["x"]
assert 2 in solution["x"]
assert 3 in solution["x"]
def test_evaluate():
instances, models = get_test_pyomo_instances()
clf_zero = Mock(spec=Classifier)
clf_zero.predict_proba = Mock(return_value=np.array([
[0., 1.], # x[0]
[0., 1.], # x[1]
[1., 0.], # x[2]
[1., 0.], # x[3]
]))
clf_one = Mock(spec=Classifier)
clf_one.predict_proba = Mock(return_value=np.array([
[1., 0.], # x[0] instances[0]
[1., 0.], # x[1] instances[0]
[0., 1.], # x[2] instances[0]
[1., 0.], # x[3] instances[0]
]))
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
threshold=0.50)
comp.fit(instances[:1])
assert comp.predict(instances[0]) == {"x": {0: 0,
1: 0,
2: 1,
3: None}}
assert instances[0].solution == {"x": {0: 1,
1: 0,
2: 1,
3: 1}}
ev = comp.evaluate(instances[:1])
assert ev == {'Fix one': {0: {'Accuracy': 0.5,
'Condition negative': 1,
'Condition negative (%)': 25.0,
'Condition positive': 3,
'Condition positive (%)': 75.0,
'F1 score': 0.5,
'False negative': 2,
'False negative (%)': 50.0,
'False positive': 0,
'False positive (%)': 0.0,
'Precision': 1.0,
'Predicted negative': 3,
'Predicted negative (%)': 75.0,
'Predicted positive': 1,
'Predicted positive (%)': 25.0,
'Recall': 0.3333333333333333,
'True negative': 1,
'True negative (%)': 25.0,
'True positive': 1,
'True positive (%)': 25.0}},
'Fix zero': {0: {'Accuracy': 0.75,
'Condition negative': 3,
'Condition negative (%)': 75.0,
'Condition positive': 1,
'Condition positive (%)': 25.0,
'F1 score': 0.6666666666666666,
'False negative': 0,
'False negative (%)': 0.0,
'False positive': 1,
'False positive (%)': 25.0,
'Precision': 0.5,
'Predicted negative': 2,
'Predicted negative (%)': 50.0,
'Predicted positive': 2,
'Predicted positive (%)': 50.0,
'Recall': 1.0,
'True negative': 2,
'True negative (%)': 50.0,
'True positive': 1,
'True positive (%)': 25.0}}}
def test_primal_parallel_fit():
instances, models = get_test_pyomo_instances()
comp = PrimalSolutionComponent()
comp.fit(instances, n_jobs=2)
assert len(comp.classifiers) == 2

View File

@@ -1,188 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock, call
from miplearn import (RelaxationComponent,
LearningSolver,
Instance,
InternalSolver)
from miplearn.classifiers import Classifier
def test_usage_with_solver():
solver = Mock(spec=LearningSolver)
internal = solver.internal_solver = Mock(spec=InternalSolver)
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
internal.get_constraint_slacks = Mock(side_effect=lambda: {
"c1": 0.5,
"c2": 0.0,
"c3": 0.0,
"c4": 1.4,
})
instance = Mock(spec=Instance)
instance.get_constraint_features = Mock(side_effect=lambda cid: {
"c2": [1.0, 0.0],
"c3": [0.5, 0.5],
"c4": [1.0],
}[cid])
instance.get_constraint_category = Mock(side_effect=lambda cid: {
"c1": None,
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
}[cid])
component = RelaxationComponent()
component.classifiers = {
"type-a": Mock(spec=Classifier),
"type-b": Mock(spec=Classifier),
}
component.classifiers["type-a"].predict_proba = \
Mock(return_value=[
[0.20, 0.80],
[0.05, 0.95],
])
component.classifiers["type-b"].predict_proba = \
Mock(return_value=[
[0.02, 0.98],
])
# LearningSolver calls before_solve
component.before_solve(solver, instance, None)
# Should relax integrality of the problem
internal.relax.assert_called_once()
# Should query list of constraints
internal.get_constraint_ids.assert_called_once()
# Should query category and features for each constraint in the model
assert instance.get_constraint_category.call_count == 4
instance.get_constraint_category.assert_has_calls([
call("c1"), call("c2"), call("c3"), call("c4"),
])
# For constraint with non-null categories, should ask for features
assert instance.get_constraint_features.call_count == 3
instance.get_constraint_features.assert_has_calls([
call("c2"), call("c3"), call("c4"),
])
# Should ask ML to predict whether constraint should be removed
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
# Should ask internal solver to remove constraints predicted as redundant
assert internal.extract_constraint.call_count == 2
internal.extract_constraint.assert_has_calls([
call("c3"), call("c4"),
])
# LearningSolver calls after_solve
component.after_solve(solver, instance, None, None)
# Should query slack for all constraints
internal.get_constraint_slacks.assert_called_once()
# Should store constraint slacks in instance object
assert hasattr(instance, "slacks")
assert instance.slacks == {
"c1": 0.5,
"c2": 0.0,
"c3": 0.0,
"c4": 1.4,
}
def test_x_y_fit_predict_evaluate():
instances = [Mock(spec=Instance), Mock(spec=Instance)]
component = RelaxationComponent(slack_tolerance=0.05,
threshold=0.80)
component.classifiers = {
"type-a": Mock(spec=Classifier),
"type-b": Mock(spec=Classifier),
}
component.classifiers["type-a"].predict_proba = \
Mock(return_value=[
[0.20, 0.80],
])
component.classifiers["type-b"].predict_proba = \
Mock(return_value=[
[0.50, 0.50],
[0.05, 0.95],
])
# First mock instance
instances[0].slacks = {
"c1": 0.00,
"c2": 0.05,
"c3": 0.00,
"c4": 30.0,
}
instances[0].get_constraint_category = Mock(side_effect=lambda cid: {
"c1": None,
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
}[cid])
instances[0].get_constraint_features = Mock(side_effect=lambda cid: {
"c2": [1.0, 0.0],
"c3": [0.5, 0.5],
"c4": [1.0],
}[cid])
# Second mock instance
instances[1].slacks = {
"c1": 0.00,
"c3": 0.30,
"c4": 0.00,
"c5": 0.00,
}
instances[1].get_constraint_category = Mock(side_effect=lambda cid: {
"c1": None,
"c3": "type-a",
"c4": "type-b",
"c5": "type-b",
}[cid])
instances[1].get_constraint_features = Mock(side_effect=lambda cid: {
"c3": [0.3, 0.4],
"c4": [0.7],
"c5": [0.8],
}[cid])
expected_x = {
"type-a": [[1.0, 0.0], [0.5, 0.5], [0.3, 0.4]],
"type-b": [[1.0], [0.7], [0.8]],
}
expected_y = {
"type-a": [[0], [0], [1]],
"type-b": [[1], [0], [0]]
}
# Should build X and Y matrices correctly
assert component.x(instances) == expected_x
assert component.y(instances) == expected_y
# Should pass along X and Y matrices to classifiers
component.fit(instances)
component.classifiers["type-a"].fit.assert_called_with(expected_x["type-a"], expected_y["type-a"])
component.classifiers["type-b"].fit.assert_called_with(expected_x["type-b"], expected_y["type-b"])
assert component.predict(expected_x) == {
"type-a": [[1]],
"type-b": [[0], [1]]
}
ev = component.evaluate(instances[1])
assert ev["True positive"] == 1
assert ev["True negative"] == 1
assert ev["False positive"] == 1
assert ev["False negative"] == 0

View File

@@ -1,105 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from abc import ABC, abstractmethod
import numpy as np
from tqdm import tqdm
logger = logging.getLogger(__name__)
class Extractor(ABC):
@abstractmethod
def extract(self, instances,):
pass
@staticmethod
def split_variables(instance):
assert hasattr(instance, "lp_solution")
result = {}
for var_name in instance.lp_solution:
for index in instance.lp_solution[var_name]:
category = instance.get_variable_category(var_name, index)
if category is None:
continue
if category not in result:
result[category] = []
result[category] += [(var_name, index)]
return result
class VariableFeaturesExtractor(Extractor):
def extract(self, instances):
result = {}
for instance in tqdm(instances,
desc="Extract (vars)",
disable=len(instances) < 5):
instance_features = instance.get_instance_features()
var_split = self.split_variables(instance)
for (category, var_index_pairs) in var_split.items():
if category not in result:
result[category] = []
for (var_name, index) in var_index_pairs:
result[category] += [
instance_features.tolist() + \
instance.get_variable_features(var_name, index).tolist() + \
[instance.lp_solution[var_name][index]]
]
for category in result:
result[category] = np.array(result[category])
return result
class SolutionExtractor(Extractor):
def __init__(self, relaxation=False):
self.relaxation = relaxation
def extract(self, instances):
result = {}
for instance in tqdm(instances,
desc="Extract (solution)",
disable=len(instances) < 5):
var_split = self.split_variables(instance)
for (category, var_index_pairs) in var_split.items():
if category not in result:
result[category] = []
for (var_name, index) in var_index_pairs:
if self.relaxation:
v = instance.lp_solution[var_name][index]
else:
v = instance.solution[var_name][index]
if v is None:
result[category] += [[0, 0]]
else:
result[category] += [[1 - v, v]]
for category in result:
result[category] = np.array(result[category])
return result
class InstanceFeaturesExtractor(Extractor):
def extract(self, instances):
return np.vstack([
np.hstack([
instance.get_instance_features(),
instance.lp_value,
])
for instance in instances
])
class ObjectiveValueExtractor(Extractor):
def __init__(self, kind="lp"):
assert kind in ["lower bound", "upper bound", "lp"]
self.kind = kind
def extract(self, instances):
if self.kind == "lower bound":
return np.array([[instance.lower_bound] for instance in instances])
if self.kind == "upper bound":
return np.array([[instance.upper_bound] for instance in instances])
if self.kind == "lp":
return np.array([[instance.lp_value] for instance in instances])

View File

@@ -1,3 +1,3 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.

View File

@@ -0,0 +1,406 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from math import log, isfinite
from typing import TYPE_CHECKING, List, Tuple, Optional
import numpy as np
from miplearn.features.sample import Sample
from miplearn.solvers.internal import LPSolveStats
if TYPE_CHECKING:
from miplearn.solvers.internal import InternalSolver
from miplearn.instance.base import Instance
class FeaturesExtractor:
def __init__(
self,
with_sa: bool = True,
with_lhs: bool = True,
) -> None:
self.with_sa = with_sa
self.with_lhs = with_lhs
def extract_after_load_features(
self,
instance: "Instance",
solver: "InternalSolver",
sample: Sample,
) -> None:
variables = solver.get_variables(with_static=True)
constraints = solver.get_constraints(with_static=True, with_lhs=self.with_lhs)
assert constraints.names is not None
sample.put_array("static_var_lower_bounds", variables.lower_bounds)
sample.put_array("static_var_names", variables.names)
sample.put_array("static_var_obj_coeffs", variables.obj_coeffs)
sample.put_array("static_var_types", variables.types)
sample.put_array("static_var_upper_bounds", variables.upper_bounds)
sample.put_array("static_constr_names", constraints.names)
# sample.put("static_constr_lhs", constraints.lhs)
sample.put_array("static_constr_rhs", constraints.rhs)
sample.put_array("static_constr_senses", constraints.senses)
# Instance features
self._extract_user_features_instance(instance, sample)
# Constraint features
(
constr_features,
constr_categories,
constr_lazy,
) = FeaturesExtractor._extract_user_features_constrs(
instance,
constraints.names,
)
sample.put_array("static_constr_features", constr_features)
sample.put_array("static_constr_categories", constr_categories)
sample.put_array("static_constr_lazy", constr_lazy)
sample.put_scalar("static_constr_lazy_count", int(constr_lazy.sum()))
# Variable features
(
vars_features_user,
var_categories,
) = self._extract_user_features_vars(instance, sample)
sample.put_array("static_var_categories", var_categories)
assert variables.lower_bounds is not None
assert variables.obj_coeffs is not None
assert variables.upper_bounds is not None
sample.put_array(
"static_var_features",
np.hstack(
[
vars_features_user,
self._extract_var_features_AlvLouWeh2017(
obj_coeffs=variables.obj_coeffs,
),
variables.lower_bounds.reshape(-1, 1),
variables.obj_coeffs.reshape(-1, 1),
variables.upper_bounds.reshape(-1, 1),
]
),
)
def extract_after_lp_features(
self,
solver: "InternalSolver",
sample: Sample,
lp_stats: LPSolveStats,
) -> None:
for (k, v) in lp_stats.__dict__.items():
sample.put_scalar(k, v)
variables = solver.get_variables(with_static=False, with_sa=self.with_sa)
constraints = solver.get_constraints(with_static=False, with_sa=self.with_sa)
sample.put_array("lp_var_basis_status", variables.basis_status)
sample.put_array("lp_var_reduced_costs", variables.reduced_costs)
sample.put_array("lp_var_sa_lb_down", variables.sa_lb_down)
sample.put_array("lp_var_sa_lb_up", variables.sa_lb_up)
sample.put_array("lp_var_sa_obj_down", variables.sa_obj_down)
sample.put_array("lp_var_sa_obj_up", variables.sa_obj_up)
sample.put_array("lp_var_sa_ub_down", variables.sa_ub_down)
sample.put_array("lp_var_sa_ub_up", variables.sa_ub_up)
sample.put_array("lp_var_values", variables.values)
sample.put_array("lp_constr_basis_status", constraints.basis_status)
sample.put_array("lp_constr_dual_values", constraints.dual_values)
sample.put_array("lp_constr_sa_rhs_down", constraints.sa_rhs_down)
sample.put_array("lp_constr_sa_rhs_up", constraints.sa_rhs_up)
sample.put_array("lp_constr_slacks", constraints.slacks)
# Variable features
lp_var_features_list = []
for f in [
sample.get_array("static_var_features"),
self._extract_var_features_AlvLouWeh2017(
obj_coeffs=sample.get_array("static_var_obj_coeffs"),
obj_sa_up=variables.sa_obj_up,
obj_sa_down=variables.sa_obj_down,
values=variables.values,
),
]:
if f is not None:
lp_var_features_list.append(f)
for f in [
variables.reduced_costs,
variables.sa_lb_down,
variables.sa_lb_up,
variables.sa_obj_down,
variables.sa_obj_up,
variables.sa_ub_down,
variables.sa_ub_up,
variables.values,
]:
if f is not None:
lp_var_features_list.append(f.reshape(-1, 1))
lp_var_features = np.hstack(lp_var_features_list)
_fix_infinity(lp_var_features)
sample.put_array("lp_var_features", lp_var_features)
# Constraint features
lp_constr_features_list = []
for f in [sample.get_array("static_constr_features")]:
if f is not None:
lp_constr_features_list.append(f)
for f in [
sample.get_array("lp_constr_dual_values"),
sample.get_array("lp_constr_sa_rhs_down"),
sample.get_array("lp_constr_sa_rhs_up"),
sample.get_array("lp_constr_slacks"),
]:
if f is not None:
lp_constr_features_list.append(f.reshape(-1, 1))
lp_constr_features = np.hstack(lp_constr_features_list)
_fix_infinity(lp_constr_features)
sample.put_array("lp_constr_features", lp_constr_features)
# Build lp_instance_features
static_instance_features = sample.get_array("static_instance_features")
assert static_instance_features is not None
assert lp_stats.lp_value is not None
assert lp_stats.lp_wallclock_time is not None
sample.put_array(
"lp_instance_features",
np.hstack(
[
static_instance_features,
lp_stats.lp_value,
lp_stats.lp_wallclock_time,
]
),
)
def extract_after_mip_features(
self,
solver: "InternalSolver",
sample: Sample,
) -> None:
variables = solver.get_variables(with_static=False, with_sa=False)
constraints = solver.get_constraints(with_static=False, with_sa=False)
sample.put_array("mip_var_values", variables.values)
sample.put_array("mip_constr_slacks", constraints.slacks)
# noinspection DuplicatedCode
def _extract_user_features_vars(
self,
instance: "Instance",
sample: Sample,
) -> Tuple[np.ndarray, np.ndarray]:
# Query variable names
var_names = sample.get_array("static_var_names")
assert var_names is not None
# Query variable features
var_features = instance.get_variable_features(var_names)
assert isinstance(var_features, np.ndarray), (
f"Variable features must be a numpy array. "
f"Found {var_features.__class__} instead."
)
assert len(var_features.shape) == 2, (
f"Variable features must be 2-dimensional array. "
f"Found array with shape {var_features.shape} instead."
)
assert var_features.shape[0] == len(var_names), (
f"Variable features must have exactly {len(var_names)} rows. "
f"Found {var_features.shape[0]} rows instead."
)
assert var_features.dtype.kind in ["f"], (
f"Variable features must be floating point numbers. "
f"Found {var_features.dtype} instead."
)
# Query variable categories
var_categories = instance.get_variable_categories(var_names)
assert isinstance(var_categories, np.ndarray), (
f"Variable categories must be a numpy array. "
f"Found {var_categories.__class__} instead."
)
assert len(var_categories.shape) == 1, (
f"Variable categories must be a vector. "
f"Found array with shape {var_categories.shape} instead."
)
assert len(var_categories) == len(var_names), (
f"Variable categories must have exactly {len(var_names)} elements. "
f"Found {var_categories.shape[0]} elements instead."
)
assert var_categories.dtype.kind == "S", (
f"Variable categories must be a numpy array with dtype='S'. "
f"Found {var_categories.dtype} instead."
)
return var_features, var_categories
# noinspection DuplicatedCode
@classmethod
def _extract_user_features_constrs(
cls,
instance: "Instance",
constr_names: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# Query constraint features
constr_features = instance.get_constraint_features(constr_names)
assert isinstance(constr_features, np.ndarray), (
f"get_constraint_features must return a numpy array. "
f"Found {constr_features.__class__} instead."
)
assert len(constr_features.shape) == 2, (
f"get_constraint_features must return a 2-dimensional array. "
f"Found array with shape {constr_features.shape} instead."
)
assert constr_features.shape[0] == len(constr_names), (
f"get_constraint_features must return an array with {len(constr_names)} "
f"rows. Found {constr_features.shape[0]} rows instead."
)
assert constr_features.dtype.kind in ["f"], (
f"get_constraint_features must return floating point numbers. "
f"Found {constr_features.dtype} instead."
)
# Query constraint categories
constr_categories = instance.get_constraint_categories(constr_names)
assert isinstance(constr_categories, np.ndarray), (
f"get_constraint_categories must return a numpy array. "
f"Found {constr_categories.__class__} instead."
)
assert len(constr_categories.shape) == 1, (
f"get_constraint_categories must return a vector. "
f"Found array with shape {constr_categories.shape} instead."
)
assert len(constr_categories) == len(constr_names), (
f"get_constraint_categories must return a vector with {len(constr_names)} "
f"elements. Found {constr_categories.shape[0]} elements instead."
)
assert constr_categories.dtype.kind == "S", (
f"get_constraint_categories must return a numpy array with dtype='S'. "
f"Found {constr_categories.dtype} instead."
)
# Query constraint lazy attribute
constr_lazy = instance.are_constraints_lazy(constr_names)
assert isinstance(constr_lazy, np.ndarray), (
f"are_constraints_lazy must return a numpy array. "
f"Found {constr_lazy.__class__} instead."
)
assert len(constr_lazy.shape) == 1, (
f"are_constraints_lazy must return a vector. "
f"Found array with shape {constr_lazy.shape} instead."
)
assert constr_lazy.shape[0] == len(constr_names), (
f"are_constraints_lazy must return a vector with {len(constr_names)} "
f"elements. Found {constr_lazy.shape[0]} elements instead."
)
assert constr_lazy.dtype.kind == "b", (
f"are_constraints_lazy must return a boolean array. "
f"Found {constr_lazy.dtype} instead."
)
return constr_features, constr_categories, constr_lazy
def _extract_user_features_instance(
self,
instance: "Instance",
sample: Sample,
) -> None:
features = instance.get_instance_features()
assert isinstance(features, np.ndarray), (
f"Instance features must be a numpy array. "
f"Found {features.__class__} instead."
)
assert len(features.shape) == 1, (
f"Instance features must be a vector. "
f"Found array with shape {features.shape} instead."
)
assert features.dtype.kind in [
"f"
], f"Instance features have unsupported {features.dtype}"
sample.put_array("static_instance_features", features)
# Alvarez, A. M., Louveaux, Q., & Wehenkel, L. (2017). A machine learning-based
# approximation of strong branching. INFORMS Journal on Computing, 29(1), 185-195.
# noinspection PyPep8Naming
def _extract_var_features_AlvLouWeh2017(
self,
obj_coeffs: Optional[np.ndarray] = None,
obj_sa_down: Optional[np.ndarray] = None,
obj_sa_up: Optional[np.ndarray] = None,
values: Optional[np.ndarray] = None,
) -> np.ndarray:
assert obj_coeffs is not None
obj_coeffs = obj_coeffs.astype(float)
_fix_infinity(obj_coeffs)
nvars = len(obj_coeffs)
if obj_sa_down is not None:
obj_sa_down = obj_sa_down.astype(float)
_fix_infinity(obj_sa_down)
if obj_sa_up is not None:
obj_sa_up = obj_sa_up.astype(float)
_fix_infinity(obj_sa_up)
if values is not None:
values = values.astype(float)
_fix_infinity(values)
pos_obj_coeffs_sum = obj_coeffs[obj_coeffs > 0].sum()
neg_obj_coeffs_sum = -obj_coeffs[obj_coeffs < 0].sum()
curr = 0
max_n_features = 8
features = np.zeros((nvars, max_n_features))
with np.errstate(divide="ignore", invalid="ignore"):
# Feature 1
features[:, curr] = np.sign(obj_coeffs)
curr += 1
# Feature 2
if abs(pos_obj_coeffs_sum) > 0:
features[:, curr] = np.abs(obj_coeffs) / pos_obj_coeffs_sum
curr += 1
# Feature 3
if abs(neg_obj_coeffs_sum) > 0:
features[:, curr] = np.abs(obj_coeffs) / neg_obj_coeffs_sum
curr += 1
# Feature 37
if values is not None:
features[:, curr] = np.minimum(
values - np.floor(values),
np.ceil(values) - values,
)
curr += 1
# Feature 44
if obj_sa_up is not None:
features[:, curr] = np.sign(obj_sa_up)
curr += 1
# Feature 46
if obj_sa_down is not None:
features[:, curr] = np.sign(obj_sa_down)
curr += 1
# Feature 47
if obj_sa_down is not None:
features[:, curr] = np.log(
obj_coeffs - obj_sa_down / np.sign(obj_coeffs)
)
curr += 1
# Feature 48
if obj_sa_up is not None:
features[:, curr] = np.log(obj_coeffs - obj_sa_up / np.sign(obj_coeffs))
curr += 1
features = features[:, 0:curr]
_fix_infinity(features)
return features
def _fix_infinity(m: np.ndarray) -> None:
masked = np.ma.masked_invalid(m)
max_values = np.max(masked, axis=0)
min_values = np.min(masked, axis=0)
m[:] = np.maximum(np.minimum(m, max_values), min_values)
m[np.isnan(m)] = 0.0

224
miplearn/features/sample.py Normal file
View File

@@ -0,0 +1,224 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import warnings
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import Dict, Optional, Any, Union, List, Tuple, cast, Set
from scipy.sparse import coo_matrix
import h5py
import numpy as np
from h5py import Dataset
from overrides import overrides
Bytes = Union[bytes, bytearray]
Scalar = Union[None, bool, str, int, float]
Vector = Union[
None,
List[bool],
List[str],
List[int],
List[float],
List[Optional[str]],
np.ndarray,
]
VectorList = Union[
List[List[bool]],
List[List[str]],
List[List[int]],
List[List[float]],
List[Optional[List[bool]]],
List[Optional[List[str]]],
List[Optional[List[int]]],
List[Optional[List[float]]],
]
class Sample(ABC):
"""Abstract dictionary-like class that stores training data."""
@abstractmethod
def get_scalar(self, key: str) -> Optional[Any]:
pass
@abstractmethod
def put_scalar(self, key: str, value: Scalar) -> None:
pass
@abstractmethod
def put_array(self, key: str, value: Optional[np.ndarray]) -> None:
pass
@abstractmethod
def get_array(self, key: str) -> Optional[np.ndarray]:
pass
@abstractmethod
def put_sparse(self, key: str, value: coo_matrix) -> None:
pass
@abstractmethod
def get_sparse(self, key: str) -> Optional[coo_matrix]:
pass
def _assert_is_scalar(self, value: Any) -> None:
if value is None:
return
if isinstance(value, (str, bool, int, float, bytes, np.bytes_)):
return
assert False, f"scalar expected; found instead: {value} ({value.__class__})"
def _assert_is_array(self, value: np.ndarray) -> None:
assert isinstance(
value, np.ndarray
), f"np.ndarray expected; found instead: {value.__class__}"
assert value.dtype.kind in "biufS", f"Unsupported dtype: {value.dtype}"
def _assert_is_sparse(self, value: Any) -> None:
assert isinstance(value, coo_matrix)
self._assert_is_array(value.data)
class MemorySample(Sample):
"""Dictionary-like class that stores training data in-memory."""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
) -> None:
if data is None:
data = {}
self._data: Dict[str, Any] = data
@overrides
def get_scalar(self, key: str) -> Optional[Any]:
return self._get(key)
@overrides
def put_scalar(self, key: str, value: Scalar) -> None:
if value is None:
return
self._assert_is_scalar(value)
self._put(key, value)
def _get(self, key: str) -> Optional[Any]:
if key in self._data:
return self._data[key]
else:
return None
def _put(self, key: str, value: Any) -> None:
self._data[key] = value
@overrides
def put_array(self, key: str, value: Optional[np.ndarray]) -> None:
if value is None:
return
self._assert_is_array(value)
self._put(key, value)
@overrides
def get_array(self, key: str) -> Optional[np.ndarray]:
return cast(Optional[np.ndarray], self._get(key))
@overrides
def put_sparse(self, key: str, value: coo_matrix) -> None:
if value is None:
return
self._assert_is_sparse(value)
self._put(key, value)
@overrides
def get_sparse(self, key: str) -> Optional[coo_matrix]:
return cast(Optional[coo_matrix], self._get(key))
class Hdf5Sample(Sample):
"""
Dictionary-like class that stores training data in an HDF5 file.
Unlike MemorySample, this class only loads to memory the parts of the data set that
are actually accessed, and therefore it is more scalable.
"""
def __init__(
self,
filename: str,
mode: str = "r+",
) -> None:
self.file = h5py.File(filename, mode, libver="latest")
@overrides
def get_scalar(self, key: str) -> Optional[Any]:
if key not in self.file:
return None
ds = self.file[key]
assert (
len(ds.shape) == 0
), f"0-dimensional array expected; found shape {ds.shape}"
if h5py.check_string_dtype(ds.dtype):
return ds.asstr()[()]
else:
return ds[()].tolist()
@overrides
def put_scalar(self, key: str, value: Any) -> None:
if value is None:
return
self._assert_is_scalar(value)
if key in self.file:
del self.file[key]
self.file.create_dataset(key, data=value)
@overrides
def put_array(self, key: str, value: Optional[np.ndarray]) -> None:
if value is None:
return
self._assert_is_array(value)
if len(value.shape) > 1 and value.dtype.kind == "f":
value = value.astype("float16")
if key in self.file:
del self.file[key]
return self.file.create_dataset(key, data=value, compression="gzip")
@overrides
def get_array(self, key: str) -> Optional[np.ndarray]:
if key not in self.file:
return None
return self.file[key][:]
@overrides
def put_sparse(self, key: str, value: coo_matrix) -> None:
if value is None:
return
self._assert_is_sparse(value)
self.put_array(f"{key}_row", value.row)
self.put_array(f"{key}_col", value.col)
self.put_array(f"{key}_data", value.data)
@overrides
def get_sparse(self, key: str) -> Optional[coo_matrix]:
row = self.get_array(f"{key}_row")
if row is None:
return None
col = self.get_array(f"{key}_col")
data = self.get_array(f"{key}_data")
assert col is not None
assert data is not None
return coo_matrix((data, (row, col)))
def get_bytes(self, key: str) -> Optional[Bytes]:
if key not in self.file:
return None
ds = self.file[key]
assert (
len(ds.shape) == 1
), f"1-dimensional array expected; found shape {ds.shape}"
return ds[()].tobytes()
def put_bytes(self, key: str, value: Bytes) -> None:
assert isinstance(
value, (bytes, bytearray)
), f"bytes expected; found: {value.__class__}" # type: ignore
self.put_array(key, np.frombuffer(value, dtype="uint8"))

View File

@@ -1,151 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import gzip
import json
from abc import ABC, abstractmethod
import numpy as np
class Instance(ABC):
"""
Abstract class holding all the data necessary to generate a concrete model of the problem.
In the knapsack problem, for example, this class could hold the number of items, their weights
and costs, as well as the size of the knapsack. Objects implementing this class are able to
convert themselves into a concrete optimization model, which can be optimized by a solver, or
into arrays of features, which can be provided as inputs to machine learning models.
"""
@abstractmethod
def to_model(self):
"""
Returns a concrete Pyomo model corresponding to this instance.
"""
pass
def get_instance_features(self):
"""
Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance.
The array is used by LearningSolver to determine how similar two instances are. It may also
be used to predict, in combination with variable-specific features, the values of binary
decision variables in the problem.
There is not necessarily a one-to-one correspondence between models and instance features:
the features may encode only part of the data necessary to generate the complete model.
Features may also be statistics computed from the original data. For example, in the
knapsack problem, an implementation may decide to provide as instance features only
the average weights, average prices, number of items and the size of the knapsack.
The returned array MUST have the same length for all relevant instances of the problem. If
two instances map into arrays of different lengths, they cannot be solved by the same
LearningSolver object.
By default, returns [0].
"""
return np.zeros(1)
def get_variable_features(self, var, index):
"""
Returns a 1-dimensional array of (numerical) features describing a particular decision
variable.
The argument `var` is a pyomo.core.Var object, which represents a collection of decision
variables. The argument `index` specifies which variable in the collection is the relevant
one.
In combination with instance features, variable features are used by LearningSolver to
predict, among other things, the optimal value of each decision variable before the
optimization takes place. In the knapsack problem, for example, an implementation could
provide as variable features the weight and the price of a specific item.
Like instance features, the arrays returned by this method MUST have the same length for
all variables within the same category, for all relevant instances of the problem.
By default, returns [0].
"""
return np.zeros(1)
def get_variable_category(self, var, index):
"""
Returns the category (a string, an integer or any hashable type) for each decision
variable.
If two variables have the same category, LearningSolver will use the same internal ML
model to predict the values of both variables. If the returned category is None, ML
models will ignore the variable.
By default, returns "default".
"""
return "default"
def get_constraint_features(self, cid):
return np.zeros(1)
def get_constraint_category(self, cid):
return cid
def has_static_lazy_constraints(self):
return False
def has_dynamic_lazy_constraints(self):
return False
def is_constraint_lazy(self, cid):
return False
def find_violated_lazy_constraints(self, model):
"""
Returns lazy constraint violations found for the current solution.
After solving a model, LearningSolver will ask the instance to identify which lazy
constraints are violated by the current solution. For each identified violation,
LearningSolver will then call the build_lazy_constraint, add the generated Pyomo
constraint to the model, then resolve the problem. The process repeats until no further
lazy constraint violations are found.
Each "violation" is simply a string, a tuple or any other hashable type which allows the
instance to identify unambiguously which lazy constraint should be generated. In the
Traveling Salesman Problem, for example, a subtour violation could be a frozen set
containing the cities in the subtour.
For a concrete example, see TravelingSalesmanInstance.
"""
return []
def build_lazy_constraint(self, model, violation):
"""
Returns a Pyomo constraint which fixes a given violation.
This method is typically called immediately after find_violated_lazy_constraints. The violation object
provided to this method is exactly the same object returned earlier by find_violated_lazy_constraints.
After some training, LearningSolver may decide to proactively build some lazy constraints
at the beginning of the optimization process, before a solution is even available. In this
case, build_lazy_constraints will be called without a corresponding call to
find_violated_lazy_constraints.
The implementation should not directly add the constraint to the model. The constraint
will be added by LearningSolver after the method returns.
For a concrete example, see TravelingSalesmanInstance.
"""
pass
def find_violated_user_cuts(self, model):
return []
def build_user_cut(self, model, violation):
pass
def load(self, filename):
with gzip.GzipFile(filename, 'r') as f:
data = json.loads(f.read().decode('utf-8'))
self.__dict__ = data
def dump(self, filename):
data = json.dumps(self.__dict__, indent=2).encode('utf-8')
with gzip.GzipFile(filename, 'w') as f:
f.write(data)

View File

@@ -1,3 +1,3 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.

198
miplearn/instance/base.py Normal file
View File

@@ -0,0 +1,198 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from abc import ABC, abstractmethod
from typing import Any, List, TYPE_CHECKING, Dict
import numpy as np
from miplearn.features.sample import Sample, MemorySample
from miplearn.types import ConstraintName, ConstraintCategory
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from miplearn.solvers.learning import InternalSolver
# noinspection PyMethodMayBeStatic
class Instance(ABC):
"""
Abstract class holding all the data necessary to generate a concrete model of the
proble.
In the knapsack problem, for example, this class could hold the number of items,
their weights and costs, as well as the size of the knapsack. Objects
implementing this class are able to convert themselves into a concrete
optimization model, which can be optimized by a solver, or into arrays of
features, which can be provided as inputs to machine learning models.
"""
def __init__(self) -> None:
self._samples: List[Sample] = []
@abstractmethod
def to_model(self) -> Any:
"""
Returns the optimization model corresponding to this instance.
"""
pass
def get_instance_features(self) -> np.ndarray:
"""
Returns a 1-dimensional array of (numerical) features describing the
entire instance.
The array is used by LearningSolver to determine how similar two instances
are. It may also be used to predict, in combination with variable-specific
features, the values of binary decision variables in the problem.
There is not necessarily a one-to-one correspondence between models and
instance features: the features may encode only part of the data necessary to
generate the complete model. Features may also be statistics computed from
the original data. For example, in the knapsack problem, an implementation
may decide to provide as instance features only the average weights, average
prices, number of items and the size of the knapsack.
The returned array MUST have the same length for all relevant instances of
the problem. If two instances map into arrays of different lengths,
they cannot be solved by the same LearningSolver object.
By default, returns [0.0].
"""
return np.zeros(1)
def get_variable_features(self, names: np.ndarray) -> np.ndarray:
"""
Returns dictionary mapping the name of each variable to a (1-dimensional) list
of numerical features describing a particular decision variable.
In combination with instance features, variable features are used by
LearningSolver to predict, among other things, the optimal value of each
decision variable before the optimization takes place. In the knapsack
problem, for example, an implementation could provide as variable features
the weight and the price of a specific item.
Like instance features, the arrays returned by this method MUST have the same
length for all variables within the same category, for all relevant instances
of the problem.
If features are not provided for a given variable, MIPLearn will use a
default set of features.
By default, returns [[0.0], ..., [0.0]].
"""
return np.zeros((len(names), 1))
def get_variable_categories(self, names: np.ndarray) -> np.ndarray:
"""
Returns a dictionary mapping the name of each variable to its category.
If two variables have the same category, LearningSolver will use the same
internal ML model to predict the values of both variables. If a variable is not
listed in the dictionary, ML models will ignore the variable.
By default, returns `names`.
"""
return names
def get_constraint_features(self, names: np.ndarray) -> np.ndarray:
return np.zeros((len(names), 1))
def get_constraint_categories(self, names: np.ndarray) -> np.ndarray:
return names
def has_dynamic_lazy_constraints(self) -> bool:
return False
def are_constraints_lazy(self, names: np.ndarray) -> np.ndarray:
return np.zeros(len(names), dtype=bool)
def find_violated_lazy_constraints(
self,
solver: "InternalSolver",
model: Any,
) -> List[ConstraintName]:
"""
Returns lazy constraint violations found for the current solution.
After solving a model, LearningSolver will ask the instance to identify which
lazy constraints are violated by the current solution. For each identified
violation, LearningSolver will then call the enforce_lazy_constraint and
resolve the problem. The process repeats until no further lazy constraint
violations are found.
Each "violation" is simply a string which allows the instance to identify
unambiguously which lazy constraint should be generated. In the Traveling
Salesman Problem, for example, a subtour violation could be a string
containing the cities in the subtour.
The current solution can be queried with `solver.get_solution()`. If the solver
is configured to use lazy callbacks, this solution may be non-integer.
For a concrete example, see TravelingSalesmanInstance.
"""
return []
def enforce_lazy_constraint(
self,
solver: "InternalSolver",
model: Any,
violation: ConstraintName,
) -> None:
"""
Adds constraints to the model to ensure that the given violation is fixed.
This method is typically called immediately after
find_violated_lazy_constraints. The violation object provided to this method
is exactly the same object returned earlier by
find_violated_lazy_constraints. After some training, LearningSolver may
decide to proactively build some lazy constraints at the beginning of the
optimization process, before a solution is even available. In this case,
enforce_lazy_constraints will be called without a corresponding call to
find_violated_lazy_constraints.
Note that this method can be called either before the optimization starts or
from within a callback. To ensure that constraints are added correctly in
either case, it is recommended to use `solver.add_constraint`, instead of
modifying the `model` object directly.
For a concrete example, see TravelingSalesmanInstance.
"""
pass
def has_user_cuts(self) -> bool:
return False
def find_violated_user_cuts(self, model: Any) -> List[ConstraintName]:
return []
def enforce_user_cut(
self,
solver: "InternalSolver",
model: Any,
violation: ConstraintName,
) -> Any:
return None
def load(self) -> None:
pass
def free(self) -> None:
pass
def flush(self) -> None:
"""
Save any pending changes made to the instance to the underlying data store.
"""
pass
def get_samples(self) -> List[Sample]:
return self._samples
def create_sample(self) -> Sample:
sample = MemorySample()
self._samples.append(sample)
return sample

131
miplearn/instance/file.py Normal file
View File

@@ -0,0 +1,131 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import gc
import os
from typing import Any, Optional, List, Dict, TYPE_CHECKING
import pickle
import numpy as np
from overrides import overrides
from miplearn.features.sample import Hdf5Sample, Sample
from miplearn.instance.base import Instance
from miplearn.types import ConstraintName, ConstraintCategory
if TYPE_CHECKING:
from miplearn.solvers.learning import InternalSolver
class FileInstance(Instance):
def __init__(self, filename: str) -> None:
super().__init__()
assert os.path.exists(filename), f"File not found: {filename}"
self.h5 = Hdf5Sample(filename)
self.instance: Optional[Instance] = None
# Delegation
# -------------------------------------------------------------------------
@overrides
def to_model(self) -> Any:
assert self.instance is not None
return self.instance.to_model()
@overrides
def get_instance_features(self) -> np.ndarray:
assert self.instance is not None
return self.instance.get_instance_features()
@overrides
def get_variable_features(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_variable_features(names)
@overrides
def get_variable_categories(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_variable_categories(names)
@overrides
def get_constraint_features(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_constraint_features(names)
@overrides
def get_constraint_categories(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_constraint_categories(names)
@overrides
def has_dynamic_lazy_constraints(self) -> bool:
assert self.instance is not None
return self.instance.has_dynamic_lazy_constraints()
@overrides
def are_constraints_lazy(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.are_constraints_lazy(names)
@overrides
def find_violated_lazy_constraints(
self,
solver: "InternalSolver",
model: Any,
) -> List[ConstraintName]:
assert self.instance is not None
return self.instance.find_violated_lazy_constraints(solver, model)
@overrides
def enforce_lazy_constraint(
self,
solver: "InternalSolver",
model: Any,
violation: ConstraintName,
) -> None:
assert self.instance is not None
self.instance.enforce_lazy_constraint(solver, model, violation)
@overrides
def find_violated_user_cuts(self, model: Any) -> List[ConstraintName]:
assert self.instance is not None
return self.instance.find_violated_user_cuts(model)
@overrides
def enforce_user_cut(
self,
solver: "InternalSolver",
model: Any,
violation: ConstraintName,
) -> None:
assert self.instance is not None
self.instance.enforce_user_cut(solver, model, violation)
# Input & Output
# -------------------------------------------------------------------------
@overrides
def free(self) -> None:
self.instance = None
gc.collect()
@overrides
def load(self) -> None:
if self.instance is not None:
return
pkl = self.h5.get_bytes("pickled")
assert pkl is not None
self.instance = pickle.loads(pkl)
assert isinstance(self.instance, Instance)
@classmethod
def save(cls, instance: Instance, filename: str) -> None:
h5 = Hdf5Sample(filename, mode="w")
instance_pkl = pickle.dumps(instance)
h5.put_bytes("pickled", instance_pkl)
@overrides
def create_sample(self) -> Sample:
return self.h5
@overrides
def get_samples(self) -> List[Sample]:
return [self.h5]

View File

@@ -0,0 +1,155 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import gc
import gzip
import os
import pickle
from typing import Optional, Any, List, cast, IO, TYPE_CHECKING, Dict
import numpy as np
from overrides import overrides
from miplearn.features.sample import Sample
from miplearn.instance.base import Instance
from miplearn.types import ConstraintName, ConstraintCategory
if TYPE_CHECKING:
from miplearn.solvers.learning import InternalSolver
class PickleGzInstance(Instance):
"""
An instance backed by a gzipped pickle file.
The instance is only loaded to memory after an operation is called (for example,
`to_model`).
Parameters
----------
filename: str
Path of the gzipped pickle file that should be loaded.
"""
# noinspection PyMissingConstructor
def __init__(self, filename: str) -> None:
assert os.path.exists(filename), f"File not found: {filename}"
self.instance: Optional[Instance] = None
self.filename: str = filename
@overrides
def to_model(self) -> Any:
assert self.instance is not None
return self.instance.to_model()
@overrides
def get_instance_features(self) -> np.ndarray:
assert self.instance is not None
return self.instance.get_instance_features()
@overrides
def get_variable_features(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_variable_features(names)
@overrides
def get_variable_categories(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_variable_categories(names)
@overrides
def get_constraint_features(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_constraint_features(names)
@overrides
def get_constraint_categories(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.get_constraint_categories(names)
@overrides
def has_dynamic_lazy_constraints(self) -> bool:
assert self.instance is not None
return self.instance.has_dynamic_lazy_constraints()
@overrides
def are_constraints_lazy(self, names: np.ndarray) -> np.ndarray:
assert self.instance is not None
return self.instance.are_constraints_lazy(names)
@overrides
def find_violated_lazy_constraints(
self,
solver: "InternalSolver",
model: Any,
) -> List[ConstraintName]:
assert self.instance is not None
return self.instance.find_violated_lazy_constraints(solver, model)
@overrides
def enforce_lazy_constraint(
self,
solver: "InternalSolver",
model: Any,
violation: ConstraintName,
) -> None:
assert self.instance is not None
self.instance.enforce_lazy_constraint(solver, model, violation)
@overrides
def find_violated_user_cuts(self, model: Any) -> List[ConstraintName]:
assert self.instance is not None
return self.instance.find_violated_user_cuts(model)
@overrides
def enforce_user_cut(
self,
solver: "InternalSolver",
model: Any,
violation: ConstraintName,
) -> None:
assert self.instance is not None
self.instance.enforce_user_cut(solver, model, violation)
@overrides
def load(self) -> None:
if self.instance is None:
obj = read_pickle_gz(self.filename)
assert isinstance(obj, Instance)
self.instance = obj
@overrides
def free(self) -> None:
self.instance = None # type: ignore
gc.collect()
@overrides
def flush(self) -> None:
write_pickle_gz(self.instance, self.filename)
@overrides
def get_samples(self) -> List[Sample]:
assert self.instance is not None
return self.instance.get_samples()
@overrides
def create_sample(self) -> Sample:
assert self.instance is not None
return self.instance.create_sample()
def write_pickle_gz(obj: Any, filename: str) -> None:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with gzip.GzipFile(filename, "wb") as file:
pickle.dump(obj, cast(IO[bytes], file))
def read_pickle_gz(filename: str) -> Any:
with gzip.GzipFile(filename, "rb") as file:
return pickle.load(cast(IO[bytes], file))
def write_pickle_gz_multiple(objs: List[Any], dirname: str) -> None:
for (i, obj) in enumerate(objs):
write_pickle_gz(obj, f"{dirname}/{i:05d}.pkl.gz")

View File

@@ -1,39 +1,61 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from datetime import timedelta
import logging
import time
import sys
import time
import traceback
import warnings
from typing import Dict, Any, Optional
class TimeFormatter():
def __init__(self, start_time, log_colors):
_formatwarning = warnings.formatwarning
class TimeFormatter(logging.Formatter):
def __init__(
self,
start_time: float,
log_colors: Dict[str, str],
) -> None:
super().__init__()
self.start_time = start_time
self.log_colors = log_colors
def format(self, record):
def format(self, record: logging.LogRecord) -> str:
if record.levelno >= logging.ERROR:
color = self.log_colors["red"]
elif record.levelno >= logging.WARNING:
color = self.log_colors["yellow"]
else:
color = self.log_colors["green"]
return "%s[%12.3f]%s %s" % (color,
return "%s[%12.3f]%s %s" % (
color,
record.created - self.start_time,
self.log_colors["reset"],
record.getMessage())
record.getMessage(),
)
def setup_logger(start_time=None,
force_color=False):
def formatwarning_tb(*args: Any, **kwargs: Any) -> str:
s = _formatwarning(*args, **kwargs)
tb = traceback.format_stack()
s += "".join(tb[:-1])
return s
def setup_logger(
start_time: Optional[float] = None,
force_color: bool = False,
) -> None:
if start_time is None:
start_time = time.time()
if sys.stdout.isatty() or force_color:
log_colors = {
"green": '\033[92m',
"yellow": '\033[93m',
"red": '\033[91m',
"reset": '\033[0m',
"green": "\033[92m",
"yellow": "\033[93m",
"red": "\033[91m",
"reset": "\033[0m",
}
else:
log_colors = {
@@ -46,4 +68,5 @@ def setup_logger(start_time=None,
handler.setFormatter(TimeFormatter(start_time, log_colors))
logging.getLogger().addHandler(handler)
logging.getLogger("miplearn").setLevel(logging.INFO)
lg = logging.getLogger("miplearn")
warnings.formatwarning = formatwarning_tb
logging.captureWarnings(True)

View File

@@ -1,3 +1,3 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.

View File

@@ -1,14 +1,17 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import miplearn
from miplearn import Instance
from typing import List, Dict, Optional
import numpy as np
import pyomo.environ as pe
from scipy.stats import uniform, randint, bernoulli
from overrides import overrides
from scipy.stats import uniform, randint, rv_discrete
from scipy.stats.distributions import rv_frozen
from miplearn.instance.base import Instance
class ChallengeA:
"""
@@ -17,13 +20,16 @@ class ChallengeA:
- K = 500, u ~ U(0., 1.)
- alpha = 0.25
"""
def __init__(self,
seed=42,
n_training_instances=500,
n_test_instances=50):
def __init__(
self,
seed: int = 42,
n_training_instances: int = 500,
n_test_instances: int = 50,
) -> None:
np.random.seed(seed)
self.gen = MultiKnapsackGenerator(n=randint(low=250, high=251),
self.gen = MultiKnapsackGenerator(
n=randint(low=250, high=251),
m=randint(low=10, high=11),
w=uniform(loc=0.0, scale=1000.0),
K=uniform(loc=500.0, scale=0.0),
@@ -42,19 +48,23 @@ class ChallengeA:
class MultiKnapsackInstance(Instance):
"""Representation of the Multidimensional 0-1 Knapsack Problem.
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
Given a set of n items and m knapsacks, the problem is to find a subset of items
S maximizing sum(prices[i] for i in S). If selected, each item i occupies
weights[i,j] units of space in each knapsack j. Furthermore, each knapsack j has
limited storage space, given by capacities[j].
This implementation assigns a different category for each decision variable, and therefore
trains one ML model per variable. It is only suitable when training and test instances have
same size and items don't shuffle around.
This implementation assigns a different category for each decision variable,
and therefore trains one ML model per variable. It is only suitable when training
and test instances have same size and items don't shuffle around.
"""
def __init__(self,
prices,
capacities,
weights):
def __init__(
self,
prices: np.ndarray,
capacities: np.ndarray,
weights: np.ndarray,
) -> None:
super().__init__()
assert isinstance(prices, np.ndarray)
assert isinstance(capacities, np.ndarray)
assert isinstance(weights, np.ndarray)
@@ -66,81 +76,88 @@ class MultiKnapsackInstance(Instance):
self.capacities = capacities
self.weights = weights
def to_model(self):
@overrides
def to_model(self) -> pe.ConcreteModel:
model = pe.ConcreteModel()
model.x = pe.Var(range(self.n), domain=pe.Binary)
model.OBJ = pe.Objective(rule=lambda model: sum(model.x[j] * self.prices[j]
for j in range(self.n)),
sense=pe.maximize)
model.OBJ = pe.Objective(
expr=sum(model.x[j] * self.prices[j] for j in range(self.n)),
sense=pe.maximize,
)
model.eq_capacity = pe.ConstraintList()
for i in range(self.m):
model.eq_capacity.add(sum(model.x[j] * self.weights[i,j]
for j in range(self.n)) <= self.capacities[i])
model.eq_capacity.add(
sum(model.x[j] * self.weights[i, j] for j in range(self.n))
<= self.capacities[i]
)
return model
def get_instance_features(self):
return np.hstack([
np.mean(self.prices),
self.capacities,
])
@overrides
def get_instance_features(self) -> np.ndarray:
return np.array([float(np.mean(self.prices))] + list(self.capacities))
def get_variable_features(self, var, index):
return np.hstack([
self.prices[index],
self.weights[:, index],
])
# def get_variable_category(self, var, index):
# return index
@overrides
def get_variable_features(self, names: np.ndarray) -> np.ndarray:
features = []
for i in range(len(self.weights)):
f = [self.prices[i]]
f.extend(self.weights[:, i])
features.append(f)
return np.array(features)
# noinspection PyPep8Naming
class MultiKnapsackGenerator:
def __init__(self,
n=randint(low=100, high=101),
m=randint(low=30, high=31),
w=randint(low=0, high=1000),
K=randint(low=500, high=500),
u=uniform(loc=0.0, scale=1.0),
alpha=uniform(loc=0.25, scale=0.0),
fix_w=False,
w_jitter=uniform(loc=1.0, scale=0.0),
round=True,
def __init__(
self,
n: rv_frozen = randint(low=100, high=101),
m: rv_frozen = randint(low=30, high=31),
w: rv_frozen = randint(low=0, high=1000),
K: rv_frozen = randint(low=500, high=501),
u: rv_frozen = uniform(loc=0.0, scale=1.0),
alpha: rv_frozen = uniform(loc=0.25, scale=0.0),
fix_w: bool = False,
w_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
round: bool = True,
):
"""Initialize the problem generator.
Instances have a random number of items (or variables) and a random number of knapsacks
(or constraints), as specified by the provided probability distributions `n` and `m`,
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
the provided distribution `w`. The capacity of knapsack `j` is set to:
Instances have a random number of items (or variables) and a random number of
knapsacks (or constraints), as specified by the provided probability
distributions `n` and `m`, respectively. The weight of each item `i` on
knapsack `j` is sampled independently from the provided distribution `w`. The
capacity of knapsack `j` is set to:
alpha_j * sum(w[i,j] for i in range(n)),
where `alpha_j`, the tightness ratio, is sampled from the provided probability
distribution `alpha`. To make the instances more challenging, the costs of the items
are linearly correlated to their average weights. More specifically, the weight of each
item `i` is set to:
where `alpha_j`, the tightness ratio, is sampled from the provided
probability distribution `alpha`. To make the instances more challenging,
the costs of the items are linearly correlated to their average weights. More
specifically, the weight of each item `i` is set to:
sum(w[i,j]/m for j in range(m)) + K * u_i,
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
from the provided probability distributions. Note that `K` is only sample once for the
entire instance.
where `K`, the correlation coefficient, and `u_i`, the correlation
multiplier, are sampled from the provided probability distributions. Note
that `K` is only sample once for the entire instance.
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
also implies that n and m are kept fixed. Although the prices and capacities are derived
from w[i,j], as long as u and K are not constants, the generated instances will still not
be completely identical.
If fix_w=True is provided, then w[i,j] are kept the same in all generated
instances. This also implies that n and m are kept fixed. Although the prices
and capacities are derived from w[i,j], as long as u and K are not constants,
the generated instances will still not be completely identical.
If a probability distribution w_jitter is provided, then item weights will be set to
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
fix_w=True, this argument may be used to generate instances where the weight of each item
is roughly the same, but not exactly identical, across all instances. The prices of the
items and the capacities of the knapsacks will be calculated as above, but using these
perturbed weights instead.
If a probability distribution w_jitter is provided, then item weights will be
set to w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When
combined with fix_w=True, this argument may be used to generate instances
where the weight of each item is roughly the same, but not exactly identical,
across all instances. The prices of the items and the capacities of the
knapsacks will be calculated as above, but using these perturbed weights
instead.
By default, all generated prices, weights and capacities are rounded to the nearest integer
number. If `round=False` is provided, this rounding will be disabled.
By default, all generated prices, weights and capacities are rounded to the
nearest integer number. If `round=False` is provided, this rounding will be
disabled.
Parameters
----------
@@ -157,30 +174,40 @@ class MultiKnapsackGenerator:
alpha: rv_continuous
Probability distribution for the tightness ratio
fix_w: boolean
If true, weights are kept the same (minus the noise from w_jitter) in all instances
If true, weights are kept the same (minus the noise from w_jitter) in all
instances
w_jitter: rv_continuous
Probability distribution for random noise added to the weights
round: boolean
If true, all prices, weights and capacities are rounded to the nearest integer
If true, all prices, weights and capacities are rounded to the nearest
integer
"""
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
assert isinstance(m, rv_frozen), "m should be a SciPy probability distribution"
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
assert isinstance(alpha, rv_frozen), "alpha should be a SciPy probability distribution"
assert isinstance(
alpha, rv_frozen
), "alpha should be a SciPy probability distribution"
assert isinstance(fix_w, bool), "fix_w should be boolean"
assert isinstance(w_jitter, rv_frozen), \
"w_jitter should be a SciPy probability distribution"
assert isinstance(
w_jitter, rv_frozen
), "w_jitter should be a SciPy probability distribution"
self.n = n
self.m = m
self.w = w
self.K = K
self.u = u
self.K = K
self.alpha = alpha
self.w_jitter = w_jitter
self.round = round
self.fix_n: Optional[int] = None
self.fix_m: Optional[int] = None
self.fix_w: Optional[np.ndarray] = None
self.fix_u: Optional[np.ndarray] = None
self.fix_K: Optional[float] = None
if fix_w:
self.fix_n = self.n.rvs()
@@ -188,16 +215,14 @@ class MultiKnapsackGenerator:
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
self.fix_u = self.u.rvs(self.fix_n)
self.fix_K = self.K.rvs()
else:
self.fix_n = None
self.fix_m = None
self.fix_w = None
self.fix_u = None
self.fix_K = None
def generate(self, n_samples):
def _sample():
def generate(self, n_samples: int) -> List[MultiKnapsackInstance]:
def _sample() -> MultiKnapsackInstance:
if self.fix_w is not None:
assert self.fix_m is not None
assert self.fix_n is not None
assert self.fix_u is not None
assert self.fix_K is not None
n = self.fix_n
m = self.fix_m
w = self.fix_w
@@ -211,66 +236,12 @@ class MultiKnapsackGenerator:
K = self.K.rvs()
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
alpha = self.alpha.rvs(m)
p = np.array([w[:,j].sum() / m + K * u[j] for j in range(n)])
b = np.array([w[i,:].sum() * alpha[i] for i in range(m)])
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
if self.round:
p = p.round()
b = b.round()
w = w.round()
return MultiKnapsackInstance(p, b, w)
return [_sample() for _ in range(n_samples)]
class KnapsackInstance(Instance):
"""
Simpler (one-dimensional) Knapsack Problem, used for testing.
"""
def __init__(self, weights, prices, capacity):
self.weights = weights
self.prices = prices
self.capacity = capacity
def to_model(self):
model = pe.ConcreteModel()
items = range(len(self.weights))
model.x = pe.Var(items, domain=pe.Binary)
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.prices[v] for v in items),
sense=pe.maximize)
model.eq_capacity = pe.Constraint(expr=sum(model.x[v] * self.weights[v]
for v in items) <= self.capacity)
return model
def get_instance_features(self):
return np.array([
self.capacity,
np.average(self.weights),
])
def get_variable_features(self, var, index):
return np.array([
self.weights[index],
self.prices[index],
])
class GurobiKnapsackInstance(KnapsackInstance):
"""
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
instead of Pyomo, used for testing.
"""
def __init__(self, weights, prices, capacity):
super().__init__(weights, prices, capacity)
def to_model(self):
import gurobipy as gp
from gurobipy import GRB
model = gp.Model("Knapsack")
n = len(self.weights)
x = model.addVars(n, vtype=GRB.BINARY, name="x")
model.addConstr(gp.quicksum(x[i] * self.weights[i]
for i in range(n)) <= self.capacity,
"eq_capacity")
model.setObjective(gp.quicksum(x[i] * self.prices[i]
for i in range(n)), GRB.MAXIMIZE)
return model

View File

@@ -1,28 +1,33 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import List, Dict
import networkx as nx
import numpy as np
import pyomo.environ as pe
import networkx as nx
from miplearn import Instance
import random
from scipy.stats import uniform, randint, bernoulli
from networkx import Graph
from overrides import overrides
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
from miplearn.instance.base import Instance
class ChallengeA:
def __init__(self,
seed=42,
n_training_instances=500,
n_test_instances=50,
):
def __init__(
self,
seed: int = 42,
n_training_instances: int = 500,
n_test_instances: int = 50,
) -> None:
np.random.seed(seed)
self.generator = MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
self.generator = MaxWeightStableSetGenerator(
w=uniform(loc=100.0, scale=50.0),
n=randint(low=200, high=201),
p=uniform(loc=0.05, scale=0.0),
fix_graph=True)
fix_graph=True,
)
np.random.seed(seed + 1)
self.training_instances = self.generator.generate(n_training_instances)
@@ -31,23 +36,80 @@ class ChallengeA:
self.test_instances = self.generator.generate(n_test_instances)
class MaxWeightStableSetInstance(Instance):
"""An instance of the Maximum-Weight Stable Set Problem.
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
a subset of vertices, no two of which are adjacent.
This is one of Karp's 21 NP-complete problems.
"""
def __init__(self, graph: Graph, weights: np.ndarray) -> None:
super().__init__()
self.graph = graph
self.weights = weights
self.nodes = list(self.graph.nodes)
@overrides
def to_model(self) -> pe.ConcreteModel:
model = pe.ConcreteModel()
model.x = pe.Var(self.nodes, domain=pe.Binary)
model.OBJ = pe.Objective(
expr=sum(model.x[v] * self.weights[v] for v in self.nodes),
sense=pe.maximize,
)
model.clique_eqs = pe.ConstraintList()
for clique in nx.find_cliques(self.graph):
model.clique_eqs.add(sum(model.x[v] for v in clique) <= 1)
return model
@overrides
def get_variable_features(self, names: np.ndarray) -> np.ndarray:
features = []
assert len(names) == len(self.nodes)
for i, v1 in enumerate(self.nodes):
assert names[i] == f"x[{v1}]".encode()
neighbor_weights = [0.0] * 15
neighbor_degrees = [100.0] * 15
for v2 in self.graph.neighbors(v1):
neighbor_weights += [self.weights[v2] / self.weights[v1]]
neighbor_degrees += [self.graph.degree(v2) / self.graph.degree(v1)]
neighbor_weights.sort(reverse=True)
neighbor_degrees.sort()
f = []
f += neighbor_weights[:5]
f += neighbor_degrees[:5]
f += [self.graph.degree(v1)]
features.append(f)
return np.array(features)
@overrides
def get_variable_categories(self, names: np.ndarray) -> np.ndarray:
return np.array(["default" for _ in names], dtype="S")
class MaxWeightStableSetGenerator:
"""Random instance generator for the Maximum-Weight Stable Set Problem.
The generator has two modes of operation. When `fix_graph=True` is provided, one random
Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$ and $p$ are sampled
from user-provided probability distributions `n` and `p`. To generate each instance, the
generator independently samples each $w_v$ from the user-provided probability distribution `w`.
The generator has two modes of operation. When `fix_graph=True` is provided,
one random Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$
and $p$ are sampled from user-provided probability distributions `n` and `p`. To
generate each instance, the generator independently samples each $w_v$ from the
user-provided probability distribution `w`.
When `fix_graph=False`, a new random graph is generated for each instance; the remaining
parameters are sampled in the same way.
When `fix_graph=False`, a new random graph is generated for each instance; the
remaining parameters are sampled in the same way.
"""
def __init__(self,
w=uniform(loc=10.0, scale=1.0),
n=randint(low=250, high=251),
p=uniform(loc=0.05, scale=0.0),
fix_graph=True):
def __init__(
self,
w: rv_frozen = uniform(loc=10.0, scale=1.0),
n: rv_frozen = randint(low=250, high=251),
p: rv_frozen = uniform(loc=0.05, scale=0.0),
fix_graph: bool = True,
):
"""Initialize the problem generator.
Parameters
@@ -70,61 +132,16 @@ class MaxWeightStableSetGenerator:
if fix_graph:
self.graph = self._generate_graph()
def generate(self, n_samples):
def _sample():
def generate(self, n_samples: int) -> List[MaxWeightStableSetInstance]:
def _sample() -> MaxWeightStableSetInstance:
if self.graph is not None:
graph = self.graph
else:
graph = self._generate_graph()
weights = self.w.rvs(graph.number_of_nodes())
return MaxWeightStableSetInstance(graph, weights)
return [_sample() for _ in range(n_samples)]
def _generate_graph(self):
def _generate_graph(self) -> Graph:
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
class MaxWeightStableSetInstance(Instance):
"""An instance of the Maximum-Weight Stable Set Problem.
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
a subset of vertices, no two of which are adjacent.
This is one of Karp's 21 NP-complete problems.
"""
def __init__(self, graph, weights):
self.graph = graph
self.weights = weights
def to_model(self):
nodes = list(self.graph.nodes)
model = pe.ConcreteModel()
model.x = pe.Var(nodes, domain=pe.Binary)
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.weights[v] for v in nodes),
sense=pe.maximize)
model.clique_eqs = pe.ConstraintList()
for clique in nx.find_cliques(self.graph):
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
return model
def get_instance_features(self):
return np.ones(0)
def get_variable_features(self, var, index):
neighbor_weights = [0] * 15
neighbor_degrees = [100] * 15
for n in self.graph.neighbors(index):
neighbor_weights += [self.weights[n] / self.weights[index]]
neighbor_degrees += [self.graph.degree(n) / self.graph.degree(index)]
neighbor_weights.sort(reverse=True)
neighbor_degrees.sort()
features = []
features += neighbor_weights[:5]
features += neighbor_degrees[:5]
features += [self.graph.degree(index)]
return np.array(features)
def get_variable_category(self, var, index):
return "default"

View File

@@ -1,25 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.knapsack import MultiKnapsackGenerator, MultiKnapsackInstance
from scipy.stats import uniform, randint
import numpy as np
def test_knapsack_generator():
gen = MultiKnapsackGenerator(n=randint(low=100, high=101),
m=randint(low=30, high=31),
w=randint(low=0, high=1000),
K=randint(low=500, high=501),
u=uniform(loc=1.0, scale=1.0),
alpha=uniform(loc=0.50, scale=0.0),
)
instances = gen.generate(100)
w_sum = sum(instance.weights for instance in instances) / len(instances)
p_sum = sum(instance.prices for instance in instances) / len(instances)
b_sum = sum(instance.capacities for instance in instances) / len(instances)
assert round(np.mean(w_sum), -1) == 500.
# assert round(np.mean(p_sum), -1) == 1200. # flaky
assert round(np.mean(b_sum), -3) == 25000.

View File

@@ -1,74 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance
import numpy as np
from numpy.linalg import norm
from scipy.spatial.distance import pdist, squareform
from scipy.stats import uniform, randint
def test_generator():
instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
y=uniform(loc=0.0, scale=1000.0),
n=randint(low=100, high=101),
gamma=uniform(loc=0.95, scale=0.1),
fix_cities=True).generate(100)
assert len(instances) == 100
assert instances[0].n_cities == 100
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
d = [instance.distances[0, 1] for instance in instances]
assert np.std(d) > 0
def test_instance():
n_cities = 4
distances = np.array([
[0., 1., 2., 1.],
[1., 0., 1., 2.],
[2., 1., 0., 1.],
[1., 2., 1., 0.],
])
instance = TravelingSalesmanInstance(n_cities, distances)
for solver_name in ['gurobi', 'cplex']:
solver = LearningSolver(solver=solver_name)
solver.solve(instance)
x = instance.solution["x"]
assert x[0, 1] == 1.0
assert x[0, 2] == 0.0
assert x[0, 3] == 1.0
assert x[1, 2] == 1.0
assert x[1, 3] == 0.0
assert x[2, 3] == 1.0
assert instance.lower_bound == 4.0
assert instance.upper_bound == 4.0
def test_subtour():
n_cities = 6
cities = np.array([
[0., 0.],
[1., 0.],
[2., 0.],
[3., 0.],
[0., 1.],
[3., 1.],
])
distances = squareform(pdist(cities))
instance = TravelingSalesmanInstance(n_cities, distances)
for solver_name in ['gurobi', 'cplex']:
solver = LearningSolver(solver=solver_name)
solver.solve(instance)
assert hasattr(instance, "found_violated_lazy_constraints")
assert hasattr(instance, "found_violated_user_cuts")
x = instance.solution["x"]
assert x[0, 1] == 1.0
assert x[0, 4] == 1.0
assert x[1, 2] == 1.0
assert x[2, 3] == 1.0
assert x[3, 5] == 1.0
assert x[4, 5] == 1.0
solver.fit([instance])
solver.solve(instance)

View File

@@ -1,26 +1,32 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import List, Tuple, FrozenSet, Any, Optional, Dict
import networkx as nx
import numpy as np
import pyomo.environ as pe
from miplearn import Instance
from scipy.stats import uniform, randint
from overrides import overrides
from scipy.spatial.distance import pdist, squareform
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
import networkx as nx
import random
from miplearn.instance.base import Instance
from miplearn.solvers.learning import InternalSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
from miplearn.types import ConstraintName
class ChallengeA:
def __init__(self,
seed=42,
n_training_instances=500,
n_test_instances=50,
):
def __init__(
self,
seed: int = 42,
n_training_instances: int = 500,
n_test_instances: int = 50,
) -> None:
np.random.seed(seed)
self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
self.generator = TravelingSalesmanGenerator(
x=uniform(loc=0.0, scale=1000.0),
y=uniform(loc=0.0, scale=1000.0),
n=randint(low=350, high=351),
gamma=uniform(loc=0.95, scale=0.1),
@@ -35,32 +41,109 @@ class ChallengeA:
self.test_instances = self.generator.generate(n_test_instances)
class TravelingSalesmanInstance(Instance):
"""An instance ot the Traveling Salesman Problem.
Given a list of cities and the distance between each pair of cities, the problem
asks for the shortest route starting at the first city, visiting each other city
exactly once, then returning to the first city. This problem is a generalization
of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.
"""
def __init__(self, n_cities: int, distances: np.ndarray) -> None:
super().__init__()
assert isinstance(distances, np.ndarray)
assert distances.shape == (n_cities, n_cities)
self.n_cities = n_cities
self.distances = distances
self.edges = [
(i, j) for i in range(self.n_cities) for j in range(i + 1, self.n_cities)
]
@overrides
def to_model(self) -> pe.ConcreteModel:
model = pe.ConcreteModel()
model.x = pe.Var(self.edges, domain=pe.Binary)
model.obj = pe.Objective(
expr=sum(model.x[i, j] * self.distances[i, j] for (i, j) in self.edges),
sense=pe.minimize,
)
model.eq_degree = pe.ConstraintList()
model.eq_subtour = pe.ConstraintList()
for i in range(self.n_cities):
model.eq_degree.add(
sum(
model.x[min(i, j), max(i, j)]
for j in range(self.n_cities)
if i != j
)
== 2
)
return model
@overrides
def find_violated_lazy_constraints(
self,
solver: InternalSolver,
model: Any,
) -> List[ConstraintName]:
selected_edges = [e for e in self.edges if model.x[e].value > 0.5]
graph = nx.Graph()
graph.add_edges_from(selected_edges)
violations = []
for c in list(nx.connected_components(graph)):
if len(c) < self.n_cities:
violations.append(",".join(map(str, c)).encode())
return violations
@overrides
def enforce_lazy_constraint(
self,
solver: InternalSolver,
model: Any,
violation: ConstraintName,
) -> None:
assert isinstance(solver, BasePyomoSolver)
component = [int(v) for v in violation.decode().split(",")]
cut_edges = [
e
for e in self.edges
if (e[0] in component and e[1] not in component)
or (e[0] not in component and e[1] in component)
]
constr = model.eq_subtour.add(expr=sum(model.x[e] for e in cut_edges) >= 2)
solver.add_constraint(constr)
class TravelingSalesmanGenerator:
"""Random generator for the Traveling Salesman Problem."""
def __init__(self,
x=uniform(loc=0.0, scale=1000.0),
y=uniform(loc=0.0, scale=1000.0),
n=randint(low=100, high=101),
gamma=uniform(loc=1.0, scale=0.0),
fix_cities=True,
round=True,
):
def __init__(
self,
x: rv_frozen = uniform(loc=0.0, scale=1000.0),
y: rv_frozen = uniform(loc=0.0, scale=1000.0),
n: rv_frozen = randint(low=100, high=101),
gamma: rv_frozen = uniform(loc=1.0, scale=0.0),
fix_cities: bool = True,
round: bool = True,
) -> None:
"""Initializes the problem generator.
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
sampled independently from the provided probability distributions `n`, `x` and `y`. For each
(unordered) pair of cities (i,j), the distance d[i,j] between them is set to:
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n,
x_i and y_i are sampled independently from the provided probability
distributions `n`, `x` and `y`. For each (unordered) pair of cities (i,j),
the distance d[i,j] between them is set to:
d[i,j] = gamma[i,j] \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
where gamma is sampled from the provided probability distribution `gamma`.
If fix_cities=True, the list of cities is kept the same for all generated instances. The
gamma values, and therefore also the distances, are still different.
If fix_cities=True, the list of cities is kept the same for all generated
instances. The gamma values, and therefore also the distances, are still
different.
By default, all distances d[i,j] are rounded to the nearest integer. If `round=False`
is provided, this rounding will be disabled.
By default, all distances d[i,j] are rounded to the nearest integer. If
`round=False` is provided, this rounding will be disabled.
Arguments
---------
@@ -71,15 +154,18 @@ class TravelingSalesmanGenerator:
n: rv_discrete
Probability distribution for the number of cities.
fix_cities: bool
If False, cities will be resampled for every generated instance. Otherwise, list of
cities will be computed once, during the constructor.
If False, cities will be resampled for every generated instance. Otherwise, list
of cities will be computed once, during the constructor.
round: bool
If True, distances are rounded to the nearest integer.
"""
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution"
assert isinstance(
gamma,
rv_frozen,
), "gamma should be a SciPy probability distribution"
self.x = x
self.y = y
self.n = n
@@ -87,14 +173,17 @@ class TravelingSalesmanGenerator:
self.round = round
if fix_cities:
self.fixed_n: Optional[int]
self.fixed_cities: Optional[np.ndarray]
self.fixed_n, self.fixed_cities = self._generate_cities()
else:
self.fixed_n = None
self.fixed_cities = None
def generate(self, n_samples):
def _sample():
def generate(self, n_samples: int) -> List[TravelingSalesmanInstance]:
def _sample() -> TravelingSalesmanInstance:
if self.fixed_cities is not None:
assert self.fixed_n is not None
n, cities = self.fixed_n, self.fixed_cities
else:
n, cities = self._generate_cities()
@@ -103,73 +192,10 @@ class TravelingSalesmanGenerator:
if self.round:
distances = distances.round()
return TravelingSalesmanInstance(n, distances)
return [_sample() for _ in range(n_samples)]
def _generate_cities(self):
def _generate_cities(self) -> Tuple[int, np.ndarray]:
n = self.n.rvs()
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
return n, cities
class TravelingSalesmanInstance(Instance):
"""An instance ot the Traveling Salesman Problem.
Given a list of cities and the distance between each pair of cities, the problem asks for the
shortest route starting at the first city, visiting each other city exactly once, then
returning to the first city. This problem is a generalization of the Hamiltonian path problem,
one of Karp's 21 NP-complete problems.
"""
def __init__(self, n_cities, distances):
assert isinstance(distances, np.ndarray)
assert distances.shape == (n_cities, n_cities)
self.n_cities = n_cities
self.distances = distances
def to_model(self):
model = pe.ConcreteModel()
model.edges = edges = [(i,j)
for i in range(self.n_cities)
for j in range(i+1, self.n_cities)]
model.x = pe.Var(edges, domain=pe.Binary)
model.obj = pe.Objective(expr=sum(model.x[i,j] * self.distances[i,j]
for (i,j) in edges),
sense=pe.minimize)
model.eq_degree = pe.ConstraintList()
model.eq_subtour = pe.ConstraintList()
for i in range(self.n_cities):
model.eq_degree.add(sum(model.x[min(i,j), max(i,j)]
for j in range(self.n_cities) if i != j) == 2)
return model
def get_instance_features(self):
return np.array([1])
def get_variable_features(self, var_name, index):
return np.array([1])
def get_variable_category(self, var_name, index):
return index
def find_violated_lazy_constraints(self, model):
selected_edges = [e for e in model.edges if model.x[e].value > 0.5]
graph = nx.Graph()
graph.add_edges_from(selected_edges)
components = [frozenset(c) for c in list(nx.connected_components(graph))]
violations = []
for c in components:
if len(c) < self.n_cities:
violations += [c]
return violations
def build_lazy_constraint(self, model, component):
cut_edges = [e for e in model.edges
if (e[0] in component and e[1] not in component) or
(e[0] not in component and e[1] in component)]
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
def find_violated_user_cuts(self, model):
return self.find_violated_lazy_constraints(model)
def build_user_cut(self, model, violation):
return self.build_lazy_constraint(model, violation)

View File

@@ -1,32 +1,48 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import sys
from typing import Any, List, TextIO, cast, TypeVar, Optional, Sized
logger = logging.getLogger(__name__)
class RedirectOutput:
def __init__(self, streams):
class _RedirectOutput:
def __init__(self, streams: List[Any]) -> None:
self.streams = streams
def write(self, data):
def write(self, data: Any) -> None:
for stream in self.streams:
stream.write(data)
def flush(self):
def flush(self) -> None:
for stream in self.streams:
stream.flush()
def __enter__(self):
def __enter__(self) -> Any:
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
sys.stdout = self
sys.stderr = self
sys.stdout = cast(TextIO, self)
sys.stderr = cast(TextIO, self)
return self
def __exit__(self, _type, _value, _traceback):
def __exit__(
self,
_type: Any,
_value: Any,
_traceback: Any,
) -> None:
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
T = TypeVar("T", bound=Sized)
def _none_if_empty(obj: T) -> Optional[T]:
if len(obj) == 0:
return None
else:
return obj

View File

@@ -1,313 +1,558 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import re
import sys
import logging
from io import StringIO
from random import randint
from typing import List, Any, Dict, Optional, TYPE_CHECKING
from . import RedirectOutput
from .internal import InternalSolver
import numpy as np
from overrides import overrides
from miplearn.instance.base import Instance
from miplearn.solvers import _RedirectOutput
from miplearn.solvers.internal import (
InternalSolver,
LPSolveStats,
IterationCallback,
LazyCallback,
MIPSolveStats,
Variables,
Constraints,
)
from miplearn.solvers.pyomo.base import PyomoTestInstanceKnapsack
from miplearn.types import (
SolverParams,
UserCutCallback,
Solution,
)
if TYPE_CHECKING:
import gurobipy
logger = logging.getLogger(__name__)
class GurobiSolver(InternalSolver):
def __init__(self,
params=None,
lazy_cb_frequency=1,
):
"""
An InternalSolver backed by Gurobi's Python API (without Pyomo).
Parameters
----------
params
Parameters to pass to Gurobi. For example, params={"MIPGap": 1e-3}
params: Optional[SolverParams]
Parameters to pass to Gurobi. For example, `params={"MIPGap": 1e-3}`
sets the gap tolerance to 1e-3.
lazy_cb_frequency
lazy_cb_frequency: int
If 1, calls lazy constraint callbacks whenever an integer solution
is found. If 2, calls it also at every node, after solving the
LP relaxation of that node.
"""
def __init__(
self,
params: Optional[SolverParams] = None,
lazy_cb_frequency: int = 1,
) -> None:
import gurobipy
assert lazy_cb_frequency in [1, 2]
if params is None:
params = {}
from gurobipy import GRB
self.GRB = GRB
self.instance = None
self.model = None
self.params = params
self._all_vars = None
self._bin_vars = None
self.cb_where = None
assert lazy_cb_frequency in [1, 2]
if lazy_cb_frequency == 1:
self.lazy_cb_where = [self.GRB.Callback.MIPSOL]
else:
self.lazy_cb_where = [self.GRB.Callback.MIPSOL,
self.GRB.Callback.MIPNODE]
params["InfUnbdInfo"] = True
params["Seed"] = randint(0, 1_000_000)
def set_instance(self, instance, model=None):
self.gp = gurobipy
self.instance: Optional[Instance] = None
self.model: Optional["gurobipy.Model"] = None
self.params: SolverParams = params
self.cb_where: Optional[int] = None
self.lazy_cb_frequency = lazy_cb_frequency
self._dirty = True
self._has_lp_solution = False
self._has_mip_solution = False
self._varname_to_var: Dict[bytes, "gurobipy.Var"] = {}
self._cname_to_constr: Dict[str, "gurobipy.Constr"] = {}
self._gp_vars: List["gurobipy.Var"] = []
self._gp_constrs: List["gurobipy.Constr"] = []
self._var_names: np.ndarray = np.empty(0)
self._constr_names: List[str] = []
self._var_types: np.ndarray = np.empty(0)
self._var_lbs: np.ndarray = np.empty(0)
self._var_ubs: np.ndarray = np.empty(0)
self._var_obj_coeffs: np.ndarray = np.empty(0)
if self.lazy_cb_frequency == 1:
self.lazy_cb_where = [self.gp.GRB.Callback.MIPSOL]
else:
self.lazy_cb_where = [
self.gp.GRB.Callback.MIPSOL,
self.gp.GRB.Callback.MIPNODE,
]
@overrides
def add_constraints(self, cf: Constraints) -> None:
assert cf.names is not None
assert cf.senses is not None
assert cf.lhs is not None
assert cf.rhs is not None
assert self.model is not None
for i in range(len(cf.names)):
sense = cf.senses[i]
lhs = self.gp.quicksum(
self._varname_to_var[varname] * coeff for (varname, coeff) in cf.lhs[i]
)
if sense == b"=":
self.model.addConstr(lhs == cf.rhs[i], name=cf.names[i])
elif sense == b"<":
self.model.addConstr(lhs <= cf.rhs[i], name=cf.names[i])
elif sense == b">":
self.model.addConstr(lhs >= cf.rhs[i], name=cf.names[i])
else:
raise Exception(f"Unknown sense: {sense}")
self.model.update()
self._dirty = True
self._has_lp_solution = False
self._has_mip_solution = False
@overrides
def are_callbacks_supported(self) -> bool:
return True
@overrides
def are_constraints_satisfied(
self,
cf: Constraints,
tol: float = 1e-5,
) -> List[bool]:
assert cf.names is not None
assert cf.senses is not None
assert cf.lhs is not None
assert cf.rhs is not None
assert self.model is not None
result = []
for i in range(len(cf.names)):
sense = cf.senses[i]
lhs = sum(
self._varname_to_var[varname].x * coeff
for (varname, coeff) in cf.lhs[i]
)
if sense == "<":
result.append(lhs <= cf.rhs[i] + tol)
elif sense == ">":
result.append(lhs >= cf.rhs[i] - tol)
else:
result.append(abs(cf.rhs[i] - lhs) <= tol)
return result
@overrides
def build_test_instance_infeasible(self) -> Instance:
return GurobiTestInstanceInfeasible()
@overrides
def build_test_instance_knapsack(self) -> Instance:
return GurobiTestInstanceKnapsack(
weights=[23.0, 26.0, 20.0, 18.0],
prices=[505.0, 352.0, 458.0, 220.0],
capacity=67.0,
)
@overrides
def clone(self) -> "GurobiSolver":
return GurobiSolver(
params=self.params,
lazy_cb_frequency=self.lazy_cb_frequency,
)
@overrides
def fix(self, solution: Solution) -> None:
self._raise_if_callback()
for (varname, value) in solution.items():
if value is None:
continue
var = self._varname_to_var[varname]
var.vtype = self.gp.GRB.CONTINUOUS
var.lb = value
var.ub = value
@overrides
def get_constraint_attrs(self) -> List[str]:
return [
"basis_status",
"categories",
"dual_values",
"lazy",
"lhs",
"names",
"rhs",
"sa_rhs_down",
"sa_rhs_up",
"senses",
"slacks",
"user_features",
]
@overrides
def get_constraints(
self,
with_static: bool = True,
with_sa: bool = True,
with_lhs: bool = True,
) -> Constraints:
model = self.model
assert model is not None
assert model.numVars == len(self._gp_vars)
def _parse_gurobi_cbasis(v: int) -> str:
if v == 0:
return "B"
if v == -1:
return "N"
raise Exception(f"unknown cbasis: {v}")
gp_constrs = model.getConstrs()
constr_names = np.array(model.getAttr("constrName", gp_constrs), dtype="S")
lhs: Optional[List] = None
rhs, senses, slacks, basis_status = None, None, None, None
dual_value, basis_status, sa_rhs_up, sa_rhs_down = None, None, None, None
if with_static:
rhs = np.array(model.getAttr("rhs", gp_constrs), dtype=float)
senses = np.array(model.getAttr("sense", gp_constrs), dtype="S")
if with_lhs:
lhs = [None for _ in gp_constrs]
for (i, gp_constr) in enumerate(gp_constrs):
expr = model.getRow(gp_constr)
lhs[i] = [
(self._var_names[expr.getVar(j).index], expr.getCoeff(j))
for j in range(expr.size())
]
if self._has_lp_solution:
dual_value = np.array(model.getAttr("pi", gp_constrs), dtype=float)
basis_status = np.array(
[_parse_gurobi_cbasis(c) for c in model.getAttr("cbasis", gp_constrs)],
dtype="S",
)
if with_sa:
sa_rhs_up = np.array(model.getAttr("saRhsUp", gp_constrs), dtype=float)
sa_rhs_down = np.array(
model.getAttr("saRhsLow", gp_constrs), dtype=float
)
if self._has_lp_solution or self._has_mip_solution:
slacks = np.array(model.getAttr("slack", gp_constrs), dtype=float)
return Constraints(
basis_status=basis_status,
dual_values=dual_value,
lhs=lhs,
names=constr_names,
rhs=rhs,
sa_rhs_down=sa_rhs_down,
sa_rhs_up=sa_rhs_up,
senses=senses,
slacks=slacks,
)
@overrides
def get_solution(self) -> Optional[Solution]:
assert self.model is not None
if self.cb_where is not None:
if self.cb_where == self.gp.GRB.Callback.MIPNODE:
return {
v.varName.encode(): self.model.cbGetNodeRel(v)
for v in self.model.getVars()
}
elif self.cb_where == self.gp.GRB.Callback.MIPSOL:
return {
v.varName.encode(): self.model.cbGetSolution(v)
for v in self.model.getVars()
}
else:
raise Exception(
f"get_solution can only be called from a callback "
f"when cb_where is either MIPNODE or MIPSOL"
)
if self.model.solCount == 0:
return None
return {v.varName.encode(): v.x for v in self.model.getVars()}
@overrides
def get_variable_attrs(self) -> List[str]:
return [
"names",
"basis_status",
"categories",
"lower_bounds",
"obj_coeffs",
"reduced_costs",
"sa_lb_down",
"sa_lb_up",
"sa_obj_down",
"sa_obj_up",
"sa_ub_down",
"sa_ub_up",
"types",
"upper_bounds",
"user_features",
"values",
]
@overrides
def get_variables(
self,
with_static: bool = True,
with_sa: bool = True,
) -> Variables:
model = self.model
assert model is not None
def _parse_gurobi_vbasis(b: int) -> str:
if b == 0:
return "B"
elif b == -1:
return "L"
elif b == -2:
return "U"
elif b == -3:
return "S"
else:
raise Exception(f"unknown vbasis: {basis_status}")
basis_status: Optional[np.ndarray] = None
upper_bounds, lower_bounds, types, values = None, None, None, None
obj_coeffs, reduced_costs = None, None
sa_obj_up, sa_ub_up, sa_lb_up = None, None, None
sa_obj_down, sa_ub_down, sa_lb_down = None, None, None
if with_static:
upper_bounds = self._var_ubs
lower_bounds = self._var_lbs
types = self._var_types
obj_coeffs = self._var_obj_coeffs
if self._has_lp_solution:
reduced_costs = np.array(model.getAttr("rc", self._gp_vars), dtype=float)
basis_status = np.array(
[
_parse_gurobi_vbasis(b)
for b in model.getAttr("vbasis", self._gp_vars)
],
dtype="S",
)
if with_sa:
sa_obj_up = np.array(
model.getAttr("saobjUp", self._gp_vars),
dtype=float,
)
sa_obj_down = np.array(
model.getAttr("saobjLow", self._gp_vars),
dtype=float,
)
sa_ub_up = np.array(
model.getAttr("saubUp", self._gp_vars),
dtype=float,
)
sa_ub_down = np.array(
model.getAttr("saubLow", self._gp_vars),
dtype=float,
)
sa_lb_up = np.array(
model.getAttr("salbUp", self._gp_vars),
dtype=float,
)
sa_lb_down = np.array(
model.getAttr("salbLow", self._gp_vars),
dtype=float,
)
if model.solCount > 0:
values = np.array(model.getAttr("x", self._gp_vars), dtype=float)
return Variables(
names=self._var_names,
upper_bounds=upper_bounds,
lower_bounds=lower_bounds,
types=types,
obj_coeffs=obj_coeffs,
reduced_costs=reduced_costs,
basis_status=basis_status,
sa_obj_up=sa_obj_up,
sa_obj_down=sa_obj_down,
sa_ub_up=sa_ub_up,
sa_ub_down=sa_ub_down,
sa_lb_up=sa_lb_up,
sa_lb_down=sa_lb_down,
values=values,
)
@overrides
def is_infeasible(self) -> bool:
assert self.model is not None
return self.model.status in [self.gp.GRB.INFEASIBLE, self.gp.GRB.INF_OR_UNBD]
@overrides
def remove_constraints(self, names: List[str]) -> None:
assert self.model is not None
constrs = [self.model.getConstrByName(n) for n in names]
self.model.remove(constrs)
self.model.update()
@overrides
def set_instance(
self,
instance: Instance,
model: Any = None,
) -> None:
self._raise_if_callback()
if model is None:
model = instance.to_model()
assert isinstance(model, self.gp.Model)
self.instance = instance
self.model = model
self.model.update()
self._update_vars()
self._update()
def _raise_if_callback(self):
if self.cb_where is not None:
raise Exception("method cannot be called from a callback")
def _update_vars(self):
self._all_vars = {}
self._bin_vars = {}
for var in self.model.getVars():
m = re.search(r"([^[]*)\[(.*)\]", var.varName)
if m is None:
name = var.varName
idx = [0]
else:
name = m.group(1)
idx = tuple(int(k) if k.isdecimal() else k
for k in m.group(2).split(","))
if len(idx) == 1:
idx = idx[0]
if name not in self._all_vars:
self._all_vars[name] = {}
self._all_vars[name][idx] = var
if var.vtype != 'C':
if name not in self._bin_vars:
self._bin_vars[name] = {}
self._bin_vars[name][idx] = var
def _apply_params(self):
for (name, value) in self.params.items():
self.model.setParam(name, value)
def solve_lp(self, tee=False):
@overrides
def set_warm_start(self, solution: Solution) -> None:
self._raise_if_callback()
self._apply_params()
streams = [StringIO()]
if tee:
streams += [sys.stdout]
for (varname, vardict) in self._bin_vars.items():
for (idx, var) in vardict.items():
var.vtype = self.GRB.CONTINUOUS
var.lb = 0.0
var.ub = 1.0
with RedirectOutput(streams):
self.model.optimize()
for (varname, vardict) in self._bin_vars.items():
for (idx, var) in vardict.items():
var.vtype = self.GRB.BINARY
log = streams[0].getvalue()
return {
"Optimal value": self.model.objVal,
"Log": log
}
self._clear_warm_start()
for (var_name, value) in solution.items():
var = self._varname_to_var[var_name]
if value is not None:
var.start = value
def solve(self,
tee=False,
iteration_cb=None,
lazy_cb=None):
@overrides
def solve(
self,
tee: bool = False,
iteration_cb: Optional[IterationCallback] = None,
lazy_cb: Optional[LazyCallback] = None,
user_cut_cb: Optional[UserCutCallback] = None,
) -> MIPSolveStats:
self._raise_if_callback()
assert self.model is not None
if iteration_cb is None:
iteration_cb = lambda: False
callback_exceptions = []
def cb_wrapper(cb_model, cb_where):
# Create callback wrapper
def cb_wrapper(cb_model: Any, cb_where: int) -> None:
try:
self.cb_where = cb_where
if cb_where in self.lazy_cb_where:
if lazy_cb is not None and cb_where in self.lazy_cb_where:
lazy_cb(self, self.model)
except:
if user_cut_cb is not None and cb_where == self.gp.GRB.Callback.MIPNODE:
user_cut_cb(self, self.model)
except Exception as e:
logger.exception("callback error")
callback_exceptions.append(e)
finally:
self.cb_where = None
if lazy_cb:
# Configure Gurobi
if lazy_cb is not None:
self.params["LazyConstraints"] = 1
self._apply_params()
if user_cut_cb is not None:
self.params["PreCrush"] = 1
# Solve problem
total_wallclock_time = 0
total_nodes = 0
streams = [StringIO()]
streams: List[Any] = [StringIO()]
if tee:
streams += [sys.stdout]
if iteration_cb is None:
iteration_cb = lambda : False
self._apply_params(streams)
while True:
logger.debug("Solving MIP...")
with RedirectOutput(streams):
if lazy_cb is None:
self.model.optimize()
else:
with _RedirectOutput(streams):
self.model.optimize(cb_wrapper)
self._dirty = False
if len(callback_exceptions) > 0:
raise callback_exceptions[0]
total_wallclock_time += self.model.runtime
total_nodes += int(self.model.nodeCount)
should_repeat = iteration_cb()
if not should_repeat:
break
self._has_lp_solution = False
self._has_mip_solution = self.model.solCount > 0
# Fetch results and stats
log = streams[0].getvalue()
ub, lb = None, None
sense = "min" if self.model.modelSense == 1 else "max"
if self.model.solCount > 0:
if self.model.modelSense == 1:
sense = "min"
lb = self.model.objBound
ub = self.model.objVal
else:
sense = "max"
lb = self.model.objVal
ub = self.model.objBound
return {
"Lower bound": lb,
"Upper bound": ub,
"Wallclock time": total_wallclock_time,
"Nodes": total_nodes,
"Sense": sense,
"Log": log,
"Warm start value": self._extract_warm_start_value(log),
}
ws_value = self._extract_warm_start_value(log)
return MIPSolveStats(
mip_lower_bound=lb,
mip_upper_bound=ub,
mip_wallclock_time=total_wallclock_time,
mip_nodes=total_nodes,
mip_sense=sense,
mip_log=log,
mip_warm_start_value=ws_value,
)
def get_solution(self):
@overrides
def solve_lp(
self,
tee: bool = False,
) -> LPSolveStats:
self._raise_if_callback()
streams: List[Any] = [StringIO()]
if tee:
streams += [sys.stdout]
self._apply_params(streams)
assert self.model is not None
for (i, var) in enumerate(self._gp_vars):
if self._var_types[i] == b"B":
var.vtype = self.gp.GRB.CONTINUOUS
var.lb = 0.0
var.ub = 1.0
with _RedirectOutput(streams):
self.model.optimize()
self._dirty = False
for (i, var) in enumerate(self._gp_vars):
if self._var_types[i] == b"B":
var.vtype = self.gp.GRB.BINARY
log = streams[0].getvalue()
self._has_lp_solution = self.model.solCount > 0
self._has_mip_solution = False
opt_value = None
if not self.is_infeasible():
opt_value = self.model.objVal
return LPSolveStats(
lp_value=opt_value,
lp_log=log,
lp_wallclock_time=self.model.runtime,
)
solution = {}
for (varname, vardict) in self._all_vars.items():
solution[varname] = {}
for (idx, var) in vardict.items():
solution[varname][idx] = var.x
return solution
def _apply_params(self, streams: List[Any]) -> None:
assert self.model is not None
with _RedirectOutput(streams):
for (name, value) in self.params.items():
self.model.setParam(name, value)
def get_value(self, var_name, index):
var = self._all_vars[var_name][index]
return self._get_value(var)
def _clear_warm_start(self) -> None:
for var in self._varname_to_var.values():
var.start = self.gp.GRB.UNDEFINED
def _get_value(self, var):
if self.cb_where == self.GRB.Callback.MIPSOL:
return self.model.cbGetSolution(var)
elif self.cb_where == self.GRB.Callback.MIPNODE:
return self.model.cbGetNodeRel(var)
elif self.cb_where is None:
return var.x
else:
raise Exception("get_value cannot be called from cb_where=%s" % self.cb_where)
def get_variables(self):
self._raise_if_callback()
variables = {}
for (varname, vardict) in self._all_vars.items():
variables[varname] = []
for (idx, var) in vardict.items():
variables[varname] += [idx]
return variables
def add_constraint(self, constraint, name=""):
if type(constraint) is tuple:
lhs, sense, rhs, name = constraint
if self.cb_where in [self.GRB.Callback.MIPSOL, self.GRB.Callback.MIPNODE]:
self.model.cbLazy(lhs, sense, rhs)
else:
self.model.addConstr(lhs, sense, rhs, name)
else:
if self.cb_where in [self.GRB.Callback.MIPSOL, self.GRB.Callback.MIPNODE]:
self.model.cbLazy(constraint)
else:
self.model.addConstr(constraint, name=name)
def set_warm_start(self, solution):
self._raise_if_callback()
count_fixed, count_total = 0, 0
for (varname, vardict) in solution.items():
for (idx, value) in vardict.items():
count_total += 1
if value is not None:
count_fixed += 1
self._all_vars[varname][idx].start = value
logger.info("Setting start values for %d variables (out of %d)" %
(count_fixed, count_total))
def clear_warm_start(self):
self._raise_if_callback()
for (varname, vardict) in self._all_vars:
for (idx, var) in vardict.items():
var[idx].start = self.GRB.UNDEFINED
def fix(self, solution):
self._raise_if_callback()
for (varname, vardict) in solution.items():
for (idx, value) in vardict.items():
if value is None:
continue
var = self._all_vars[varname][idx]
var.vtype = self.GRB.CONTINUOUS
var.lb = value
var.ub = value
def get_constraint_ids(self):
self._raise_if_callback()
self.model.update()
return [c.ConstrName for c in self.model.getConstrs()]
def extract_constraint(self, cid):
self._raise_if_callback()
constr = self.model.getConstrByName(cid)
cobj = (self.model.getRow(constr),
constr.sense,
constr.RHS,
constr.ConstrName)
self.model.remove(constr)
return cobj
def is_constraint_satisfied(self, cobj, tol=1e-5):
lhs, sense, rhs, name = cobj
if self.cb_where is not None:
lhs_value = lhs.getConstant()
for i in range(lhs.size()):
var = lhs.getVar(i)
coeff = lhs.getCoeff(i)
lhs_value += self._get_value(var) * coeff
else:
lhs_value = lhs.getValue()
if sense == "<":
return lhs_value <= rhs + tol
elif sense == ">":
return lhs_value >= rhs - tol
elif sense == "=":
return abs(rhs - lhs_value) < abs(tol)
else:
raise Exception("Unknown sense: %s" % sense)
def get_constraint_slacks(self):
return {c.ConstrName: c.Slack for c in self.model.getConstrs()}
def relax(self):
self.model = self.model.relax()
self._update_vars()
def set_branching_priorities(self, priorities):
self._raise_if_callback()
logger.warning("set_branching_priorities not implemented")
def set_threads(self, threads):
self._raise_if_callback()
self.params["Threads"] = threads
def set_time_limit(self, time_limit):
self._raise_if_callback()
self.params["TimeLimit"] = time_limit
def set_node_limit(self, node_limit):
self._raise_if_callback()
self.params["NodeLimit"] = node_limit
def set_gap_tolerance(self, gap_tolerance):
self._raise_if_callback()
self.params["MIPGap"] = gap_tolerance
def _extract_warm_start_value(self, log):
ws = self.__extract(log, "MIP start with objective ([0-9.e+-]*)")
if ws is not None:
ws = float(ws)
return ws
def __extract(self, log, regexp, default=None):
@staticmethod
def _extract(
log: str,
regexp: str,
default: Optional[str] = None,
) -> Optional[str]:
value = default
for line in log.splitlines():
matches = re.findall(regexp, line)
@@ -316,19 +561,158 @@ class GurobiSolver(InternalSolver):
value = matches[0]
return value
def __getstate__(self):
def _extract_warm_start_value(self, log: str) -> Optional[float]:
ws = self._extract(log, "MIP start with objective ([0-9.e+-]*)")
if ws is None:
return None
return float(ws)
def _get_value(self, var: Any) -> float:
assert self.model is not None
if self.cb_where == self.gp.GRB.Callback.MIPSOL:
return self.model.cbGetSolution(var)
elif self.cb_where == self.gp.GRB.Callback.MIPNODE:
return self.model.cbGetNodeRel(var)
elif self.cb_where is None:
return var.x
else:
raise Exception(
"get_value cannot be called from cb_where=%s" % self.cb_where
)
def _raise_if_callback(self) -> None:
if self.cb_where is not None:
raise Exception("method cannot be called from a callback")
def _update(self) -> None:
assert self.model is not None
gp_vars: List["gurobipy.Var"] = self.model.getVars()
gp_constrs: List["gurobipy.Constr"] = self.model.getConstrs()
var_names: np.ndarray = np.array(
self.model.getAttr("varName", gp_vars),
dtype="S",
)
var_types: np.ndarray = np.array(
self.model.getAttr("vtype", gp_vars),
dtype="S",
)
var_ubs: np.ndarray = np.array(
self.model.getAttr("ub", gp_vars),
dtype=float,
)
var_lbs: np.ndarray = np.array(
self.model.getAttr("lb", gp_vars),
dtype=float,
)
var_obj_coeffs: np.ndarray = np.array(
self.model.getAttr("obj", gp_vars),
dtype=float,
)
constr_names: List[str] = self.model.getAttr("constrName", gp_constrs)
varname_to_var: Dict[bytes, "gurobipy.Var"] = {}
cname_to_constr: Dict = {}
for (i, gp_var) in enumerate(gp_vars):
assert var_names[i] not in varname_to_var, (
f"Duplicated variable name detected: {var_names[i]}. "
f"Unique variable names are currently required."
)
if var_types[i] == b"I":
assert var_ubs[i] == 1.0, (
"Only binary and continuous variables are currently supported. "
f"Integer variable {var_names[i]} has upper bound {var_ubs[i]}."
)
assert var_lbs[i] == 0.0, (
"Only binary and continuous variables are currently supported. "
f"Integer variable {var_names[i]} has lower bound {var_ubs[i]}."
)
var_types[i] = b"B"
assert var_types[i] in [b"B", b"C"], (
"Only binary and continuous variables are currently supported. "
f"Variable {var_names[i]} has type {var_types[i]}."
)
varname_to_var[var_names[i]] = gp_var
for (i, gp_constr) in enumerate(gp_constrs):
assert constr_names[i] not in cname_to_constr, (
f"Duplicated constraint name detected: {constr_names[i]}. "
f"Unique constraint names are currently required."
)
cname_to_constr[constr_names[i]] = gp_constr
self._varname_to_var = varname_to_var
self._cname_to_constr = cname_to_constr
self._gp_vars = gp_vars
self._gp_constrs = gp_constrs
self._var_names = var_names
self._constr_names = constr_names
self._var_types = var_types
self._var_lbs = var_lbs
self._var_ubs = var_ubs
self._var_obj_coeffs = var_obj_coeffs
def __getstate__(self) -> Dict:
return {
"params": self.params,
"lazy_cb_where": self.lazy_cb_where,
}
def __setstate__(self, state):
from gurobipy import GRB
def __setstate__(self, state: Dict) -> None:
self.params = state["params"]
self.lazy_cb_where = state["lazy_cb_where"]
self.GRB = GRB
self.instance = None
self.model = None
self._all_vars = None
self._bin_vars = None
self.cb_where = None
class GurobiTestInstanceInfeasible(Instance):
@overrides
def to_model(self) -> Any:
import gurobipy as gp
from gurobipy import GRB
model = gp.Model()
x = model.addVars(1, vtype=GRB.BINARY, name="x")
model.addConstr(x[0] >= 2)
model.setObjective(x[0])
return model
class GurobiTestInstanceKnapsack(PyomoTestInstanceKnapsack):
"""
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
instead of Pyomo, used for testing.
"""
def __init__(
self,
weights: List[float],
prices: List[float],
capacity: float,
) -> None:
super().__init__(weights, prices, capacity)
@overrides
def to_model(self) -> Any:
import gurobipy as gp
from gurobipy import GRB
model = gp.Model("Knapsack")
n = len(self.weights)
x = model.addVars(n, vtype=GRB.BINARY, name="x")
z = model.addVar(vtype=GRB.CONTINUOUS, name="z", ub=self.capacity)
model.addConstr(
gp.quicksum(x[i] * self.weights[i] for i in range(n)) == z,
"eq_capacity",
)
model.setObjective(
gp.quicksum(x[i] * self.prices[i] for i in range(n)), GRB.MAXIMIZE
)
return model
@overrides
def enforce_lazy_constraint(
self,
solver: InternalSolver,
model: Any,
violation: str,
) -> None:
x0 = model.getVarByName("x[0]")
model.cbLazy(x0 <= 0)

View File

@@ -1,15 +1,127 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Optional, List, Tuple, TYPE_CHECKING
import numpy as np
from miplearn.instance.base import Instance
from miplearn.types import (
IterationCallback,
LazyCallback,
UserCutCallback,
Solution,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from miplearn.features.sample import Sample
class ExtractedConstraint(ABC):
pass
@dataclass
class LPSolveStats:
lp_log: Optional[str] = None
lp_value: Optional[float] = None
lp_wallclock_time: Optional[float] = None
def to_list(self) -> List[float]:
features: List[float] = []
for attr in ["lp_value", "lp_wallclock_time"]:
if getattr(self, attr) is not None:
features.append(getattr(self, attr))
return features
@dataclass
class MIPSolveStats:
mip_lower_bound: Optional[float] = None
mip_log: Optional[str] = None
mip_nodes: Optional[int] = None
mip_sense: Optional[str] = None
mip_upper_bound: Optional[float] = None
mip_wallclock_time: Optional[float] = None
mip_warm_start_value: Optional[float] = None
@dataclass
class Variables:
names: Optional[np.ndarray] = None
basis_status: Optional[np.ndarray] = None
lower_bounds: Optional[np.ndarray] = None
obj_coeffs: Optional[np.ndarray] = None
reduced_costs: Optional[np.ndarray] = None
sa_lb_down: Optional[np.ndarray] = None
sa_lb_up: Optional[np.ndarray] = None
sa_obj_down: Optional[np.ndarray] = None
sa_obj_up: Optional[np.ndarray] = None
sa_ub_down: Optional[np.ndarray] = None
sa_ub_up: Optional[np.ndarray] = None
types: Optional[np.ndarray] = None
upper_bounds: Optional[np.ndarray] = None
values: Optional[np.ndarray] = None
@dataclass
class Constraints:
basis_status: Optional[np.ndarray] = None
dual_values: Optional[np.ndarray] = None
lazy: Optional[np.ndarray] = None
lhs: Optional[List[List[Tuple[bytes, float]]]] = None
names: Optional[np.ndarray] = None
rhs: Optional[np.ndarray] = None
sa_rhs_down: Optional[np.ndarray] = None
sa_rhs_up: Optional[np.ndarray] = None
senses: Optional[np.ndarray] = None
slacks: Optional[np.ndarray] = None
@staticmethod
def from_sample(sample: "Sample") -> "Constraints":
return Constraints(
basis_status=sample.get_array("lp_constr_basis_status"),
dual_values=sample.get_array("lp_constr_dual_values"),
lazy=sample.get_array("static_constr_lazy"),
# lhs=sample.get_vector("static_constr_lhs"),
names=sample.get_array("static_constr_names"),
rhs=sample.get_array("static_constr_rhs"),
sa_rhs_down=sample.get_array("lp_constr_sa_rhs_down"),
sa_rhs_up=sample.get_array("lp_constr_sa_rhs_up"),
senses=sample.get_array("static_constr_senses"),
slacks=sample.get_array("lp_constr_slacks"),
)
def __getitem__(self, selected: List[bool]) -> "Constraints":
return Constraints(
basis_status=(
None if self.basis_status is None else self.basis_status[selected]
),
dual_values=(
None if self.dual_values is None else self.dual_values[selected]
),
names=(None if self.names is None else self.names[selected]),
lazy=(None if self.lazy is None else self.lazy[selected]),
lhs=self._filter(self.lhs, selected),
rhs=(None if self.rhs is None else self.rhs[selected]),
sa_rhs_down=(
None if self.sa_rhs_down is None else self.sa_rhs_down[selected]
),
sa_rhs_up=(None if self.sa_rhs_up is None else self.sa_rhs_up[selected]),
senses=(None if self.senses is None else self.senses[selected]),
slacks=(None if self.slacks is None else self.slacks[selected]),
)
def _filter(
self,
obj: Optional[List],
selected: List[bool],
) -> Optional[List]:
if obj is None:
return None
return [obj[i] for (i, selected_i) in enumerate(selected) if selected_i]
class InternalSolver(ABC):
@@ -18,67 +130,152 @@ class InternalSolver(ABC):
"""
@abstractmethod
def solve_lp(self, tee=False):
def add_constraints(self, cf: Constraints) -> None:
"""Adds the given constraints to the model."""
pass
@abstractmethod
def are_constraints_satisfied(
self,
cf: Constraints,
tol: float = 1e-5,
) -> List[bool]:
"""
Solves the LP relaxation of the currently loaded instance. After this
method finishes, the solution can be retrieved by calling `get_solution`.
Checks whether the current solution satisfies the given constraints.
"""
pass
Parameters
----------
tee: bool
If true, prints the solver log to the screen.
def are_callbacks_supported(self) -> bool:
"""
Returns True if this solver supports native callbacks, such as lazy constraints
callback or user cuts callback.
"""
return False
Returns
-------
dict
A dictionary of solver statistics containing the following keys:
"Optimal value".
@abstractmethod
def build_test_instance_infeasible(self) -> Instance:
"""
Returns an infeasible instance, for testing purposes.
"""
pass
@abstractmethod
def get_solution(self):
def build_test_instance_knapsack(self) -> Instance:
"""
Returns an instance corresponding to the following MIP, for testing purposes:
maximize 505 x0 + 352 x1 + 458 x2 + 220 x3
s.t. eq_capacity: z = 23 x0 + 26 x1 + 20 x2 + 18 x3
x0, x1, x2, x3 binary
0 <= z <= 67 continuous
"""
pass
@abstractmethod
def clone(self) -> "InternalSolver":
"""
Returns a new copy of this solver with identical parameters, but otherwise
completely unitialized.
"""
pass
@abstractmethod
def fix(self, solution: Solution) -> None:
"""
Fixes the values of a subset of decision variables. Missing values in the
solution indicate variables that should be left free.
"""
pass
@abstractmethod
def get_solution(self) -> Optional[Solution]:
"""
Returns current solution found by the solver.
If called after `solve`, returns the best primal solution found during
the search. If called after `solve_lp`, returns the optimal solution
to the LP relaxation.
The solution is a dictionary `sol`, where the optimal value of `var[idx]`
is given by `sol[var][idx]`.
to the LP relaxation. If no primal solution is available, return None.
"""
pass
@abstractmethod
def set_warm_start(self, solution):
def get_constraint_attrs(self) -> List[str]:
"""
Returns a list of constraint attributes supported by this solver. Used for
testing purposes only.
"""
Sets the warm start to be used by the solver.
The solution should be a dictionary following the same format as the
one produced by `get_solution`. Only one warm start is supported.
Calling this function when a warm start already exists will
remove the previous warm start.
pass
@abstractmethod
def get_constraints(
self,
with_static: bool = True,
with_sa: bool = True,
with_lhs: bool = True,
) -> Constraints:
pass
@abstractmethod
def get_variable_attrs(self) -> List[str]:
"""
Returns a list of variable attributes supported by this solver. Used for
testing purposes only.
"""
pass
@abstractmethod
def clear_warm_start(self):
def get_variables(
self,
with_static: bool = True,
with_sa: bool = True,
) -> Variables:
"""
Removes any existing warm start from the solver.
Returns a description of the decision variables in the problem.
Parameters
----------
with_static: bool
If True, include features that do not change during the solution process,
such as variable types and names. This parameter is used to reduce the
amount of duplicated data collected by LearningSolver. Features that do
not change are only collected once.
with_sa: bool
If True, collect sensitivity analysis information. For large models,
collecting this information may be expensive, so this parameter is useful
for reducing running times.
"""
pass
@abstractmethod
def set_instance(self, instance, model=None):
def is_infeasible(self) -> bool:
"""
Returns True if the model has been proved to be infeasible.
Must be called after solve.
"""
pass
@abstractmethod
def remove_constraints(self, names: np.ndarray) -> None:
"""
Removes the given constraints from the model.
"""
pass
@abstractmethod
def set_instance(
self,
instance: Instance,
model: Any = None,
) -> None:
"""
Loads the given instance into the solver.
Parameters
----------
instance: miplearn.Instance
instance: Instance
The instance to be loaded.
model:
model: Any
The concrete optimization model corresponding to this instance
(e.g. JuMP.Model or pyomo.core.ConcreteModel). If not provided,
it will be generated by calling `instance.to_model()`.
@@ -86,140 +283,66 @@ class InternalSolver(ABC):
pass
@abstractmethod
def fix(self, solution):
def set_warm_start(self, solution: Solution) -> None:
"""
Fixes the values of a subset of decision variables.
Sets the warm start to be used by the solver.
The values should be provided in the dictionary format generated by
`get_solution`. Missing values in the solution indicate variables
that should be left free.
Only one warm start is supported. Calling this function when a warm start
already exists will remove the previous warm start.
"""
pass
@abstractmethod
def set_branching_priorities(self, priorities):
"""
Sets the branching priorities for the given decision variables.
When the MIP solver needs to decide on which variable to branch, variables
with higher priority are picked first, given that they are fractional.
Ties are solved arbitrarily. By default, all variables have priority zero.
The priorities should be provided in the dictionary format generated by
`get_solution`. Missing values indicate variables whose priorities
should not be modified.
"""
pass
@abstractmethod
def add_constraint(self, constraint):
"""
Adds a single constraint to the model.
"""
pass
@abstractmethod
def solve(self, tee=False, iteration_cb=None, lazy_cb=None):
def solve(
self,
tee: bool = False,
iteration_cb: Optional[IterationCallback] = None,
lazy_cb: Optional[LazyCallback] = None,
user_cut_cb: Optional[UserCutCallback] = None,
) -> MIPSolveStats:
"""
Solves the currently loaded instance. After this method finishes,
the best solution found can be retrieved by calling `get_solution`.
Parameters
----------
iteration_cb: () -> Bool
iteration_cb: IterationCallback
By default, InternalSolver makes a single call to the native `solve`
method and returns the result. If an iteration callback is provided
instead, InternalSolver enters a loop, where `solve` and `iteration_cb`
are called alternatively. To stop the loop, `iteration_cb` should
return False. Any other result causes the solver to loop again.
lazy_cb: (internal_solver, model) -> None
are called alternatively. To stop the loop, `iteration_cb` should return
False. Any other result causes the solver to loop again.
lazy_cb: LazyCallback
This function is called whenever the solver finds a new candidate
solution and can be used to add lazy constraints to the model. Only
two operations within the callback are allowed:
- Querying the value of a variable, through `get_value(var, idx)`
- Querying if a constraint is satisfied, through `is_constraint_satisfied(cobj)`
- Adding a new constraint to the problem, through `add_constraint`
solution and can be used to add lazy constraints to the model. Only the
following operations within the callback are allowed:
- Querying the value of a variable
- Querying if a constraint is satisfied
- Adding a new constraint to the problem
Additional operations may be allowed by specific subclasses.
tee: Bool
user_cut_cb: UserCutCallback
This function is called whenever the solver found a new integer-infeasible
solution and needs to generate cutting planes to cut it off.
tee: bool
If true, prints the solver log to the screen.
Returns
-------
dict
A dictionary of solver statistics containing the following keys:
"Lower bound", "Upper bound", "Wallclock time", "Nodes", "Sense",
"Log" and "Warm start value".
"""
pass
@abstractmethod
def get_value(self, var_name, index):
def solve_lp(
self,
tee: bool = False,
) -> LPSolveStats:
"""
Returns the current value of a decision variable.
Solves the LP relaxation of the currently loaded instance. After this
method finishes, the solution can be retrieved by calling `get_solution`.
This method should not permanently modify the problem. That is, subsequent
calls to `solve` should solve the original MIP, not the LP relaxation.
Parameters
----------
tee
If true, prints the solver log to the screen.
"""
pass
@abstractmethod
def get_constraint_ids(self):
"""
Returns a list of ids, which uniquely identify each constraint in the model.
"""
pass
@abstractmethod
def extract_constraint(self, cid):
"""
Removes a given constraint from the model and returns an object `cobj` which
can be used to verify if the removed constraint is still satisfied by
the current solution, using `is_constraint_satisfied(cobj)`, and can potentially
be re-added to the model using `add_constraint(cobj)`.
"""
pass
@abstractmethod
def relax(self):
"""
Drops all integrality constraints from the model.
"""
pass
@abstractmethod
def get_constraint_slacks(self):
"""
Returns a dictionary mapping constraint name to the constraint slack
in the current solution.
"""
pass
@abstractmethod
def is_constraint_satisfied(self, cobj):
pass
@abstractmethod
def set_threads(self, threads):
pass
@abstractmethod
def set_time_limit(self, time_limit):
pass
@abstractmethod
def set_node_limit(self, node_limit):
pass
@abstractmethod
def set_gap_tolerance(self, gap_tolerance):
pass
@abstractmethod
def get_variables(self):
pass
def get_empty_solution(self):
solution = {}
for (var, indices) in self.get_variables().items():
solution[var] = {}
for idx in indices:
solution[var][idx] = 0.0
return solution

View File

@@ -1,58 +1,65 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from copy import deepcopy
from typing import Optional, List
import time
import traceback
from typing import Optional, List, Any, cast, Dict, Tuple
from p_tqdm import p_map
from .. import (ObjectiveValueComponent,
PrimalSolutionComponent,
DynamicLazyConstraintsComponent,
UserCutsComponent)
from .pyomo.cplex import CplexPyomoSolver
from .pyomo.gurobi import GurobiPyomoSolver
from miplearn.components.component import Component
from miplearn.components.dynamic_lazy import DynamicLazyConstraintsComponent
from miplearn.components.dynamic_user_cuts import UserCutsComponent
from miplearn.components.objective import ObjectiveValueComponent
from miplearn.components.primal import PrimalSolutionComponent
from miplearn.features.extractor import FeaturesExtractor
from miplearn.instance.base import Instance
from miplearn.instance.picklegz import PickleGzInstance
from miplearn.solvers import _RedirectOutput
from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
from miplearn.types import LearningSolveStats
logger = logging.getLogger(__name__)
# Global memory for multiprocessing
SOLVER = [None] # type: List[Optional[LearningSolver]]
INSTANCES = [None] # type: List[Optional[dict]]
class _GlobalVariables:
def __init__(self) -> None:
self.solver: Optional[LearningSolver] = None
self.instances: Optional[List[Instance]] = None
self.discard_outputs: bool = False
def _parallel_solve(instance_idx):
solver = deepcopy(SOLVER[0])
instance = INSTANCES[0][instance_idx]
if not hasattr(instance, "found_violated_lazy_constraints"):
instance.found_violated_lazy_constraints = []
if not hasattr(instance, "found_violated_user_cuts"):
instance.found_violated_user_cuts = []
if not hasattr(instance, "slacks"):
instance.slacks = {}
solver_results = solver.solve(instance)
return {
"solver_results": solver_results,
"solution": instance.solution,
"lp_solution": instance.lp_solution,
"found_violated_lazy_constraints": instance.found_violated_lazy_constraints,
"found_violated_user_cuts": instance.found_violated_user_cuts,
"slacks": instance.slacks
}
# Global variables used for multiprocessing. Global variables are copied by the
# operating system when the process forks. Local variables are copied through
# serialization, which is a much slower process.
_GLOBAL = [_GlobalVariables()]
def _parallel_solve(
idx: int,
) -> Tuple[Optional[LearningSolveStats], Optional[Instance]]:
solver = _GLOBAL[0].solver
instances = _GLOBAL[0].instances
discard_outputs = _GLOBAL[0].discard_outputs
assert solver is not None
assert instances is not None
try:
stats = solver.solve(
instances[idx],
discard_output=discard_outputs,
)
instances[idx].free()
return stats, instances[idx]
except Exception as e:
traceback.print_exc()
logger.exception(f"Exception while solving {instances[idx]}. Ignoring.")
return None, None
class LearningSolver:
def __init__(self,
components=None,
gap_tolerance=1e-4,
mode="exact",
solver="gurobi",
threads=None,
time_limit=None,
node_limit=None,
solve_lp_first=True,
use_lazy_cb=False):
"""
Mixed-Integer Linear Programming (MIP) solver that extracts information
from previous runs and uses Machine Learning methods to accelerate the
@@ -60,122 +67,271 @@ class LearningSolver:
Parameters
----------
components
Set of components in the solver. By default, includes:
- ObjectiveValueComponent
- PrimalSolutionComponent
- DynamicLazyConstraintsComponent
- UserCutsComponent
gap_tolerance
Relative MIP gap tolerance. By default, 1e-4.
mode
components: List[Component]
Set of components in the solver. By default, includes
`ObjectiveValueComponent`, `PrimalSolutionComponent`,
`DynamicLazyConstraintsComponent` and `UserCutsComponent`.
mode: str
If "exact", solves problem to optimality, keeping all optimality
guarantees provided by the MIP solver. If "heuristic", uses machine
learning more agressively, and may return suboptimal solutions.
solver
The internal MIP solver to use. Can be either "cplex", "gurobi", a
solver class such as GurobiSolver, or a solver instance such as
GurobiSolver().
threads
Maximum number of threads to use. If None, uses solver default.
time_limit
Maximum running time in seconds. If None, uses solver default.
node_limit
Maximum number of branch-and-bound nodes to explore. If None, uses
solver default.
use_lazy_cb
If True, uses lazy callbacks to enforce lazy constraints, instead of
a simple solver loop. This functionality may not supported by
all internal MIP solvers.
solve_lp_first: bool
If true, solve LP relaxation first, then solve original MILP. This
learning more aggressively, and may return suboptimal solutions.
solver: Callable[[], InternalSolver]
A callable that constructs the internal solver. If None is provided,
use GurobiPyomoSolver.
use_lazy_cb: bool
If true, use native solver callbacks for enforcing lazy constraints,
instead of a simple loop. May not be supported by all solvers.
solve_lp: bool
If true, solve the root LP relaxation before solving the MIP. This
option should be activated if the LP relaxation is not very
expensive to solve and if it provides good hints for the integer
solution.
simulate_perfect: bool
If true, each call to solve actually performs three actions: solve
the original problem, train the ML models on the data that was just
collected, and solve the problem again. This is useful for evaluating
the theoretical performance of perfect ML models.
"""
self.components = {}
self.mode = mode
self.internal_solver = None
self.internal_solver_factory = solver
self.threads = threads
self.time_limit = time_limit
self.gap_tolerance = gap_tolerance
self.tee = False
self.node_limit = node_limit
self.solve_lp_first = solve_lp_first
self.use_lazy_cb = use_lazy_cb
def __init__(
self,
components: Optional[List[Component]] = None,
mode: str = "exact",
solver: Optional[InternalSolver] = None,
use_lazy_cb: bool = False,
solve_lp: bool = True,
simulate_perfect: bool = False,
extractor: Optional[FeaturesExtractor] = None,
extract_lhs: bool = True,
extract_sa: bool = True,
) -> None:
if solver is None:
solver = GurobiPyomoSolver()
if extractor is None:
extractor = FeaturesExtractor(
with_sa=extract_sa,
with_lhs=extract_lhs,
)
assert isinstance(solver, InternalSolver)
self.components: Dict[str, Component] = {}
self.internal_solver: Optional[InternalSolver] = None
self.internal_solver_prototype: InternalSolver = solver
self.mode: str = mode
self.simulate_perfect: bool = simulate_perfect
self.solve_lp: bool = solve_lp
self.tee = False
self.use_lazy_cb: bool = use_lazy_cb
self.extractor = extractor
if components is not None:
for comp in components:
self.add(comp)
self._add_component(comp)
else:
self.add(ObjectiveValueComponent())
self.add(PrimalSolutionComponent())
self.add(DynamicLazyConstraintsComponent())
self.add(UserCutsComponent())
self._add_component(ObjectiveValueComponent())
self._add_component(PrimalSolutionComponent(mode=mode))
self._add_component(DynamicLazyConstraintsComponent())
self._add_component(UserCutsComponent())
assert self.mode in ["exact", "heuristic"]
for component in self.components.values():
component.mode = self.mode
def _create_internal_solver(self):
logger.debug("Initializing %s" % self.internal_solver_factory)
if self.internal_solver_factory == "cplex":
solver = CplexPyomoSolver()
elif self.internal_solver_factory == "gurobi":
solver = GurobiPyomoSolver()
elif callable(self.internal_solver_factory):
solver = self.internal_solver_factory()
else:
solver = self.internal_solver_factory
if self.threads is not None:
logger.info("Setting threads to %d" % self.threads)
solver.set_threads(self.threads)
if self.time_limit is not None:
logger.info("Setting time limit to %f" % self.time_limit)
solver.set_time_limit(self.time_limit)
if self.gap_tolerance is not None:
logger.info("Setting gap tolerance to %f" % self.gap_tolerance)
solver.set_gap_tolerance(self.gap_tolerance)
if self.node_limit is not None:
logger.info("Setting node limit to %d" % self.node_limit)
solver.set_node_limit(self.node_limit)
return solver
def _solve(
self,
instance: Instance,
model: Any = None,
discard_output: bool = False,
tee: bool = False,
) -> LearningSolveStats:
def solve(self,
# Generate model
# -------------------------------------------------------
instance.load()
if model is None:
with _RedirectOutput([]):
model = instance.to_model()
# Initialize training sample
# -------------------------------------------------------
sample = instance.create_sample()
# Initialize stats
# -------------------------------------------------------
stats: LearningSolveStats = {}
# Initialize internal solver
# -------------------------------------------------------
self.tee = tee
self.internal_solver = self.internal_solver_prototype.clone()
assert self.internal_solver is not None
assert isinstance(self.internal_solver, InternalSolver)
self.internal_solver.set_instance(instance, model)
# Extract features (after-load)
# -------------------------------------------------------
logger.info("Extracting features (after-load)...")
initial_time = time.time()
self.extractor.extract_after_load_features(
instance, self.internal_solver, sample
)
logger.info(
"Features (after-load) extracted in %.2f seconds"
% (time.time() - initial_time)
)
callback_args = (
self,
instance,
model=None,
tee=False):
model,
stats,
sample,
)
# Solve root LP relaxation
# -------------------------------------------------------
lp_stats = None
if self.solve_lp:
logger.debug("Running before_solve_lp callbacks...")
for component in self.components.values():
component.before_solve_lp(*callback_args)
logger.info("Solving root LP relaxation...")
lp_stats = self.internal_solver.solve_lp(tee=tee)
stats.update(cast(LearningSolveStats, lp_stats.__dict__))
assert lp_stats.lp_wallclock_time is not None
logger.info(
"LP relaxation solved in %.2f seconds" % lp_stats.lp_wallclock_time
)
logger.debug("Running after_solve_lp callbacks...")
for component in self.components.values():
component.after_solve_lp(*callback_args)
# Extract features (after-lp)
# -------------------------------------------------------
logger.info("Extracting features (after-lp)...")
initial_time = time.time()
self.extractor.extract_after_lp_features(
self.internal_solver, sample, lp_stats
)
logger.info(
"Features (after-lp) extracted in %.2f seconds"
% (time.time() - initial_time)
)
# Callback wrappers
# -------------------------------------------------------
def iteration_cb_wrapper() -> bool:
should_repeat = False
for comp in self.components.values():
if comp.iteration_cb(self, instance, model):
should_repeat = True
return should_repeat
def lazy_cb_wrapper(
cb_solver: InternalSolver,
cb_model: Any,
) -> None:
for comp in self.components.values():
comp.lazy_cb(self, instance, model)
def user_cut_cb_wrapper(
cb_solver: InternalSolver,
cb_model: Any,
) -> None:
for comp in self.components.values():
comp.user_cut_cb(self, instance, model)
lazy_cb = None
if self.use_lazy_cb:
lazy_cb = lazy_cb_wrapper
user_cut_cb = None
if instance.has_user_cuts():
user_cut_cb = user_cut_cb_wrapper
# Before-solve callbacks
# -------------------------------------------------------
logger.debug("Running before_solve_mip callbacks...")
for component in self.components.values():
component.before_solve_mip(*callback_args)
# Solve MIP
# -------------------------------------------------------
logger.info("Solving MIP...")
mip_stats = self.internal_solver.solve(
tee=tee,
iteration_cb=iteration_cb_wrapper,
user_cut_cb=user_cut_cb,
lazy_cb=lazy_cb,
)
assert mip_stats.mip_wallclock_time is not None
logger.info("MIP solved in %.2f seconds" % mip_stats.mip_wallclock_time)
stats.update(cast(LearningSolveStats, mip_stats.__dict__))
stats["Solver"] = "default"
stats["Gap"] = self._compute_gap(
ub=mip_stats.mip_upper_bound,
lb=mip_stats.mip_lower_bound,
)
stats["Mode"] = self.mode
# Extract features (after-mip)
# -------------------------------------------------------
logger.info("Extracting features (after-mip)...")
initial_time = time.time()
for (k, v) in mip_stats.__dict__.items():
sample.put_scalar(k, v)
self.extractor.extract_after_mip_features(self.internal_solver, sample)
logger.info(
"Features (after-mip) extracted in %.2f seconds"
% (time.time() - initial_time)
)
# After-solve callbacks
# -------------------------------------------------------
logger.debug("Calling after_solve_mip callbacks...")
for component in self.components.values():
component.after_solve_mip(*callback_args)
# Flush
# -------------------------------------------------------
if not discard_output:
instance.flush()
return stats
def solve(
self,
instance: Instance,
model: Any = None,
discard_output: bool = False,
tee: bool = False,
) -> LearningSolveStats:
"""
Solves the given instance. If trained machine-learning models are
available, they will be used to accelerate the solution process.
This method modifies the instance object. Specifically, the following
properties are set:
- instance.lp_solution
- instance.lp_value
- instance.lower_bound
- instance.upper_bound
- instance.solution
- instance.solver_log
Additional solver components may set additional properties. Please
see their documentation for more details.
The argument `instance` may be either an Instance object or a
filename pointing to a pickled Instance object.
This method adds a new training sample to `instance.training_sample`.
If a filename is provided, then the file is modified in-place. That is,
the original file is overwritten.
If `solver.solve_lp_first` is False, the properties lp_solution and
lp_value will be set to dummy values.
Parameters
----------
instance: miplearn.Instance
The instance to be solved
model: pyomo.core.ConcreteModel
instance: Instance
The instance to be solved.
model: Any
The corresponding Pyomo model. If not provided, it will be created.
discard_output: bool
If True, do not write the modified instances anywhere; simply discard
them. Useful during benchmarking.
tee: bool
If true, prints solver log to screen.
Returns
-------
dict
LearningSolveStats
A dictionary of solver statistics containing at least the following
keys: "Lower bound", "Upper bound", "Wallclock time", "Nodes",
"Sense", "Log", "Warm start value" and "LP value".
@@ -185,106 +341,118 @@ class LearningSolver:
"Predicted UB". See the documentation of each component for more
details.
"""
if self.simulate_perfect:
if not isinstance(instance, PickleGzInstance):
raise Exception("Not implemented")
self._solve(
instance=instance,
model=model,
tee=tee,
)
self.fit([instance])
instance.instance = None
return self._solve(
instance=instance,
model=model,
discard_output=discard_output,
tee=tee,
)
if model is None:
model = instance.to_model()
def parallel_solve(
self,
instances: List[Instance],
n_jobs: int = 4,
label: str = "Solve",
discard_outputs: bool = False,
) -> List[LearningSolveStats]:
"""
Solves multiple instances in parallel.
self.tee = tee
self.internal_solver = self._create_internal_solver()
self.internal_solver.set_instance(instance, model)
This method is equivalent to calling `solve` for each item on the list,
but it processes multiple instances at the same time. Like `solve`, this
method modifies each instance in place. Also like `solve`, a list of
filenames may be provided.
if self.solve_lp_first:
logger.info("Solving LP relaxation...")
results = self.internal_solver.solve_lp(tee=tee)
instance.lp_solution = self.internal_solver.get_solution()
instance.lp_value = results["Optimal value"]
Parameters
----------
discard_outputs: bool
If True, do not write the modified instances anywhere; simply discard
them instead. Useful during benchmarking.
label: str
Label to show in the progress bar.
instances: List[Instance]
The instances to be solved.
n_jobs: int
Number of instances to solve in parallel at a time.
Returns
-------
List[LearningSolveStats]
List of solver statistics, with one entry for each provided instance.
The list is the same you would obtain by calling
`[solver.solve(p) for p in instances]`
"""
if n_jobs == 1:
return [self.solve(p) for p in instances]
else:
instance.lp_solution = self.internal_solver.get_empty_solution()
instance.lp_value = 0.0
logger.debug("Running before_solve callbacks...")
for component in self.components.values():
component.before_solve(self, instance, model)
def iteration_cb():
should_repeat = False
for comp in self.components.values():
if comp.after_iteration(self, instance, model):
should_repeat = True
return should_repeat
def lazy_cb_wrapper(cb_solver, cb_model):
for comp in self.components.values():
comp.on_lazy_callback(self, instance, model)
lazy_cb = None
if self.use_lazy_cb:
lazy_cb = lazy_cb_wrapper
logger.info("Solving MILP...")
results = self.internal_solver.solve(tee=tee,
iteration_cb=iteration_cb,
lazy_cb=lazy_cb)
results["LP value"] = instance.lp_value
# Read MIP solution and bounds
instance.lower_bound = results["Lower bound"]
instance.upper_bound = results["Upper bound"]
instance.solver_log = results["Log"]
instance.solution = self.internal_solver.get_solution()
logger.debug("Calling after_solve callbacks...")
for component in self.components.values():
component.after_solve(self, instance, model, results)
return results
def parallel_solve(self,
instances,
n_jobs=4,
label="Solve"):
self.internal_solver = None
self._silence_miplearn_logger()
SOLVER[0] = self
INSTANCES[0] = instances
p_map_results = p_map(_parallel_solve,
_GLOBAL[0].solver = self
_GLOBAL[0].instances = instances
_GLOBAL[0].discard_outputs = discard_outputs
results = p_map(
_parallel_solve,
list(range(len(instances))),
num_cpus=n_jobs,
desc=label)
results = [p["solver_results"] for p in p_map_results]
for (idx, r) in enumerate(p_map_results):
instances[idx].solution = r["solution"]
instances[idx].lp_solution = r["lp_solution"]
instances[idx].lp_value = r["solver_results"]["LP value"]
instances[idx].lower_bound = r["solver_results"]["Lower bound"]
instances[idx].upper_bound = r["solver_results"]["Upper bound"]
instances[idx].found_violated_lazy_constraints = r["found_violated_lazy_constraints"]
instances[idx].found_violated_user_cuts = r["found_violated_user_cuts"]
instances[idx].slacks = r["slacks"]
instances[idx].solver_log = r["solver_results"]["Log"]
desc=label,
)
results = [r for r in results if r[0]]
stats = []
for (idx, (s, instance)) in enumerate(results):
stats.append(s)
instances[idx] = instance
self._restore_miplearn_logger()
return results
return stats
def fit(self, training_instances):
def fit(
self,
training_instances: List[Instance],
n_jobs: int = 1,
) -> None:
if len(training_instances) == 0:
logger.warning("Empty list of training instances provided. Skipping.")
return
for component in self.components.values():
component.fit(training_instances)
Component.fit_multiple(
list(self.components.values()),
training_instances,
n_jobs=n_jobs,
)
def add(self, component):
def _add_component(self, component: Component) -> None:
name = component.__class__.__name__
self.components[name] = component
def _silence_miplearn_logger(self):
def _silence_miplearn_logger(self) -> None:
miplearn_logger = logging.getLogger("miplearn")
self.prev_log_level = miplearn_logger.getEffectiveLevel()
miplearn_logger.setLevel(logging.WARNING)
def _restore_miplearn_logger(self):
def _restore_miplearn_logger(self) -> None:
miplearn_logger = logging.getLogger("miplearn")
miplearn_logger.setLevel(self.prev_log_level)
def __getstate__(self):
def __getstate__(self) -> Dict:
self.internal_solver = None
return self.__dict__
@staticmethod
def _compute_gap(ub: Optional[float], lb: Optional[float]) -> Optional[float]:
if lb is None or ub is None or lb * ub < 0:
# solver did not find a solution and/or bound
return None
elif abs(ub - lb) < 1e-6:
# avoid division by zero when ub = lb = 0
return 0.0
else:
# divide by max(abs(ub),abs(lb)) to ensure gap <= 1
return (ub - lb) / max(abs(ub), abs(lb))

View File

@@ -1,3 +1,3 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.

View File

@@ -1,19 +1,40 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import re
import sys
import logging
import pyomo
from abc import abstractmethod
from io import StringIO
from pyomo import environ as pe
from pyomo.core import Var, Constraint
from typing import Any, List, Dict, Optional, Tuple
from .. import RedirectOutput
from ..internal import InternalSolver
from ...instance import Instance
import numpy as np
import pyomo
from overrides import overrides
from pyomo import environ as pe
from pyomo.core import Var, Suffix, Objective
from pyomo.core.base import _GeneralVarData
from pyomo.core.base.constraint import ConstraintList
from pyomo.core.expr.numeric_expr import SumExpression, MonomialTermExpression
from pyomo.opt import TerminationCondition
from pyomo.opt.base.solvers import SolverFactory
from miplearn.instance.base import Instance
from miplearn.solvers import _RedirectOutput, _none_if_empty
from miplearn.solvers.internal import (
InternalSolver,
LPSolveStats,
IterationCallback,
LazyCallback,
MIPSolveStats,
Variables,
Constraints,
)
from miplearn.types import (
SolverParams,
UserCutCallback,
Solution,
)
logger = logging.getLogger(__name__)
@@ -23,160 +44,453 @@ class BasePyomoSolver(InternalSolver):
Base class for all Pyomo solvers.
"""
def __init__(self):
self.instance = None
self.model = None
self._all_vars = None
self._bin_vars = None
self._is_warm_start_available = False
self._pyomo_solver = None
self._obj_sense = None
self._varname_to_var = {}
self._cname_to_constr = {}
def __init__(
self,
solver_factory: SolverFactory,
params: SolverParams,
) -> None:
self.instance: Optional[Instance] = None
self.model: Optional[pe.ConcreteModel] = None
self.params = params
self._all_vars: List[pe.Var] = []
self._bin_vars: List[pe.Var] = []
self._is_warm_start_available: bool = False
self._pyomo_solver: SolverFactory = solver_factory
self._obj_sense: str = "min"
self._varname_to_var: Dict[bytes, pe.Var] = {}
self._cname_to_constr: Dict[str, pe.Constraint] = {}
self._termination_condition: str = ""
self._has_lp_solution = False
self._has_mip_solution = False
self._obj: Dict[str, float] = {}
def solve_lp(self, tee=False):
for var in self._bin_vars:
lb, ub = var.bounds
var.setlb(lb)
var.setub(ub)
var.domain = pyomo.core.base.set_types.Reals
self._pyomo_solver.update_var(var)
results = self._pyomo_solver.solve(tee=tee)
for var in self._bin_vars:
var.domain = pyomo.core.base.set_types.Binary
self._pyomo_solver.update_var(var)
return {
"Optimal value": results["Problem"][0]["Lower bound"],
}
for (key, value) in params.items():
self._pyomo_solver.options[key] = value
def get_solution(self):
solution = {}
def add_constraint(
self,
constr: Any,
) -> None:
assert self.model is not None
self._pyomo_solver.add_constraint(constr)
self._termination_condition = ""
self._has_lp_solution = False
self._has_mip_solution = False
@overrides
def add_constraints(self, cf: Constraints) -> None:
assert cf.names is not None
assert cf.senses is not None
assert cf.lhs is not None
assert cf.rhs is not None
assert self.model is not None
for (i, name) in enumerate(cf.names):
lhs = 0.0
for (varname, coeff) in cf.lhs[i]:
var = self._varname_to_var[varname]
lhs += var * coeff
if cf.senses[i] == b"=":
expr = lhs == cf.rhs[i]
elif cf.senses[i] == b"<":
expr = lhs <= cf.rhs[i]
elif cf.senses[i] == b">":
expr = lhs >= cf.rhs[i]
else:
raise Exception(f"Unknown sense: {cf.senses[i]}")
cl = pe.Constraint(expr=expr, name=name)
self.model.add_component(name.decode(), cl)
self._pyomo_solver.add_constraint(cl)
self._cname_to_constr[name] = cl
self._termination_condition = ""
self._has_lp_solution = False
self._has_mip_solution = False
@overrides
def are_callbacks_supported(self) -> bool:
return False
@overrides
def are_constraints_satisfied(
self,
cf: Constraints,
tol: float = 1e-5,
) -> List[bool]:
assert cf.names is not None
assert cf.lhs is not None
assert cf.rhs is not None
assert cf.senses is not None
result = []
for (i, name) in enumerate(cf.names):
lhs = 0.0
for (varname, coeff) in cf.lhs[i]:
var = self._varname_to_var[varname]
lhs += var.value * coeff
if cf.senses[i] == "<":
result.append(lhs <= cf.rhs[i] + tol)
elif cf.senses[i] == ">":
result.append(lhs >= cf.rhs[i] - tol)
else:
result.append(abs(cf.rhs[i] - lhs) < tol)
return result
@overrides
def build_test_instance_infeasible(self) -> Instance:
return PyomoTestInstanceInfeasible()
@overrides
def build_test_instance_knapsack(self) -> Instance:
return PyomoTestInstanceKnapsack(
weights=[23.0, 26.0, 20.0, 18.0],
prices=[505.0, 352.0, 458.0, 220.0],
capacity=67.0,
)
@overrides
def fix(self, solution: Solution) -> None:
for (varname, value) in solution.items():
if value is None:
continue
var = self._varname_to_var[varname]
var.fix(value)
self._pyomo_solver.update_var(var)
@overrides
def get_constraints(
self,
with_static: bool = True,
with_sa: bool = True,
with_lhs: bool = True,
) -> Constraints:
model = self.model
assert model is not None
names: List[str] = []
rhs: List[float] = []
lhs: List[List[Tuple[bytes, float]]] = []
senses: List[str] = []
dual_values: List[float] = []
slacks: List[float] = []
def _parse_constraint(c: pe.Constraint) -> None:
assert model is not None
if with_static:
# Extract RHS and sense
has_ub = c.has_ub()
has_lb = c.has_lb()
assert (
(not has_lb) or (not has_ub) or c.upper() == c.lower()
), "range constraints not supported"
if not has_ub:
senses.append(">")
rhs.append(float(c.lower()))
elif not has_lb:
senses.append("<")
rhs.append(float(c.upper()))
else:
senses.append("=")
rhs.append(float(c.upper()))
if with_lhs:
# Extract LHS
lhsc = []
expr = c.body
if isinstance(expr, SumExpression):
for term in expr._args_:
if isinstance(term, MonomialTermExpression):
lhsc.append(
(
term._args_[1].name.encode(),
float(term._args_[0]),
)
)
elif isinstance(term, _GeneralVarData):
lhsc.append((term.name.encode(), 1.0))
else:
raise Exception(
f"Unknown term type: {term.__class__.__name__}"
)
elif isinstance(expr, _GeneralVarData):
lhsc.append((expr.name.encode(), 1.0))
else:
raise Exception(
f"Unknown expression type: {expr.__class__.__name__}"
)
lhs.append(lhsc)
# Extract dual values
if self._has_lp_solution:
dual_values.append(model.dual[c])
# Extract slacks
if self._has_mip_solution or self._has_lp_solution:
slacks.append(model.slack[c])
for constr in model.component_objects(pyomo.core.Constraint):
if isinstance(constr, pe.ConstraintList):
for idx in constr:
names.append(f"{constr.name}[{idx}]")
_parse_constraint(constr[idx])
else:
names.append(constr.name)
_parse_constraint(constr)
return Constraints(
names=_none_if_empty(np.array(names, dtype="S")),
rhs=_none_if_empty(np.array(rhs, dtype=float)),
senses=_none_if_empty(np.array(senses, dtype="S")),
lhs=_none_if_empty(lhs),
slacks=_none_if_empty(np.array(slacks, dtype=float)),
dual_values=_none_if_empty(np.array(dual_values, dtype=float)),
)
@overrides
def get_constraint_attrs(self) -> List[str]:
return [
"dual_values",
"lhs",
"names",
"rhs",
"senses",
"slacks",
]
@overrides
def get_solution(self) -> Optional[Solution]:
assert self.model is not None
if self.is_infeasible():
return None
solution: Solution = {}
for var in self.model.component_objects(Var):
solution[str(var)] = {}
for index in var:
solution[str(var)][index] = var[index].value
if var[index].fixed:
continue
solution[f"{var}[{index}]".encode()] = var[index].value
return solution
def get_value(self, var_name, index):
var = self._varname_to_var[var_name]
return var[index].value
@overrides
def get_variables(
self,
with_static: bool = True,
with_sa: bool = True,
) -> Variables:
assert self.model is not None
def get_variables(self):
variables = {}
for var in self.model.component_objects(Var):
variables[str(var)] = []
for index in var:
variables[str(var)] += [index]
return variables
names: List[str] = []
types: List[str] = []
upper_bounds: List[float] = []
lower_bounds: List[float] = []
obj_coeffs: List[float] = []
reduced_costs: List[float] = []
values: List[float] = []
def set_warm_start(self, solution):
self.clear_warm_start()
count_total, count_fixed = 0, 0
for var_name in solution:
var = self._varname_to_var[var_name]
for index in solution[var_name]:
count_total += 1
var[index].value = solution[var_name][index]
if solution[var_name][index] is not None:
count_fixed += 1
if count_fixed > 0:
self._is_warm_start_available = True
logger.info("Setting start values for %d variables (out of %d)" %
(count_fixed, count_total))
for (i, var) in enumerate(self.model.component_objects(pyomo.core.Var)):
for idx in var:
v = var[idx]
def clear_warm_start(self):
for var in self._all_vars:
if not var.fixed:
var.value = None
self._is_warm_start_available = False
# Variable name
if idx is None:
names.append(str(var))
else:
names.append(f"{var}[{idx}]")
def set_instance(self, instance, model=None):
if with_static:
# Variable type
if v.domain == pyomo.core.Binary:
types.append("B")
elif v.domain in [
pyomo.core.Reals,
pyomo.core.NonNegativeReals,
pyomo.core.NonPositiveReals,
pyomo.core.NegativeReals,
pyomo.core.PositiveReals,
]:
types.append("C")
else:
raise Exception(f"unknown variable domain: {v.domain}")
# Bounds
lb, ub = v.bounds
upper_bounds.append(float(ub))
lower_bounds.append(float(lb))
# Objective coefficient
if v.name in self._obj:
obj_coeffs.append(self._obj[v.name])
else:
obj_coeffs.append(0.0)
# Reduced costs
if self._has_lp_solution:
reduced_costs.append(self.model.rc[v])
# Values
if self._has_lp_solution or self._has_mip_solution:
values.append(v.value)
return Variables(
names=_none_if_empty(np.array(names, dtype="S")),
types=_none_if_empty(np.array(types, dtype="S")),
upper_bounds=_none_if_empty(np.array(upper_bounds, dtype=float)),
lower_bounds=_none_if_empty(np.array(lower_bounds, dtype=float)),
obj_coeffs=_none_if_empty(np.array(obj_coeffs, dtype=float)),
reduced_costs=_none_if_empty(np.array(reduced_costs, dtype=float)),
values=_none_if_empty(np.array(values, dtype=float)),
)
@overrides
def get_variable_attrs(self) -> List[str]:
return [
"names",
# "basis_status",
"categories",
"lower_bounds",
"obj_coeffs",
"reduced_costs",
# "sa_lb_down",
# "sa_lb_up",
# "sa_obj_down",
# "sa_obj_up",
# "sa_ub_down",
# "sa_ub_up",
"types",
"upper_bounds",
"user_features",
"values",
]
@overrides
def is_infeasible(self) -> bool:
return self._termination_condition == TerminationCondition.infeasible
@overrides
def remove_constraints(self, names: List[str]) -> None:
assert self.model is not None
for name in names:
constr = self._cname_to_constr[name]
del self._cname_to_constr[name]
self.model.del_component(constr)
self._pyomo_solver.remove_constraint(constr)
@overrides
def set_instance(
self,
instance: Instance,
model: Any = None,
) -> None:
if model is None:
model = instance.to_model()
assert isinstance(instance, Instance)
assert isinstance(model, pe.ConcreteModel)
self.instance = instance
self.model = model
self.model.extra_constraints = ConstraintList()
self.model.dual = Suffix(direction=Suffix.IMPORT)
self.model.rc = Suffix(direction=Suffix.IMPORT)
self.model.slack = Suffix(direction=Suffix.IMPORT)
self._pyomo_solver.set_instance(model)
self._update_obj()
self._update_vars()
self._update_constrs()
def _update_obj(self):
self._obj_sense = "max"
if self._pyomo_solver._objective.sense == pyomo.core.kernel.objective.minimize:
self._obj_sense = "min"
def _update_vars(self):
self._all_vars = []
self._bin_vars = []
self._varname_to_var = {}
for var in self.model.component_objects(Var):
self._varname_to_var[var.name] = var
for idx in var:
self._all_vars += [var[idx]]
if var[idx].domain == pyomo.core.base.set_types.Binary:
self._bin_vars += [var[idx]]
def _update_constrs(self):
self._cname_to_constr = {}
for constr in self.model.component_objects(Constraint):
self._cname_to_constr[constr.name] = constr
def fix(self, solution):
count_total, count_fixed = 0, 0
for varname in solution:
for index in solution[varname]:
var = self._varname_to_var[varname]
count_total += 1
if solution[varname][index] is None:
@overrides
def set_warm_start(self, solution: Solution) -> None:
self._clear_warm_start()
count_fixed = 0
for (var_name, value) in solution.items():
if value is None:
continue
var = self._varname_to_var[var_name]
var.value = solution[var_name]
count_fixed += 1
var[index].fix(solution[varname][index])
self._pyomo_solver.update_var(var[index])
logger.info("Fixing values for %d variables (out of %d)" %
(count_fixed, count_total))
if count_fixed > 0:
self._is_warm_start_available = True
def add_constraint(self, constraint):
self._pyomo_solver.add_constraint(constraint)
self._update_constrs()
def solve(self,
tee=False,
iteration_cb=None,
lazy_cb=None):
if lazy_cb is not None:
raise Exception("lazy callback not supported")
@overrides
def solve(
self,
tee: bool = False,
iteration_cb: Optional[IterationCallback] = None,
lazy_cb: Optional[LazyCallback] = None,
user_cut_cb: Optional[UserCutCallback] = None,
) -> MIPSolveStats:
assert lazy_cb is None, "callbacks are not currently supported"
assert user_cut_cb is None, "callbacks are not currently supported"
total_wallclock_time = 0
streams = [StringIO()]
streams: List[Any] = [StringIO()]
if tee:
streams += [sys.stdout]
if iteration_cb is None:
iteration_cb = lambda: False
self.instance.found_violated_lazy_constraints = []
self.instance.found_violated_user_cuts = []
while True:
logger.debug("Solving MIP...")
with RedirectOutput(streams):
results = self._pyomo_solver.solve(tee=True,
warmstart=self._is_warm_start_available)
with _RedirectOutput(streams):
results = self._pyomo_solver.solve(
tee=True,
warmstart=self._is_warm_start_available,
)
total_wallclock_time += results["Solver"][0]["Wallclock time"]
should_repeat = iteration_cb()
if not should_repeat:
break
log = streams[0].getvalue()
return {
"Lower bound": results["Problem"][0]["Lower bound"],
"Upper bound": results["Problem"][0]["Upper bound"],
"Wallclock time": total_wallclock_time,
"Nodes": self._extract_node_count(log),
"Sense": self._obj_sense,
"Log": log,
"Warm start value": self._extract_warm_start_value(log),
}
node_count = self._extract_node_count(log)
ws_value = self._extract_warm_start_value(log)
self._termination_condition = results["Solver"][0]["Termination condition"]
lb, ub = None, None
self._has_mip_solution = False
self._has_lp_solution = False
if not self.is_infeasible():
self._has_mip_solution = True
lb = results["Problem"][0]["Lower bound"]
ub = results["Problem"][0]["Upper bound"]
return MIPSolveStats(
mip_lower_bound=lb,
mip_upper_bound=ub,
mip_wallclock_time=total_wallclock_time,
mip_sense=self._obj_sense,
mip_log=log,
mip_nodes=node_count,
mip_warm_start_value=ws_value,
)
@overrides
def solve_lp(
self,
tee: bool = False,
) -> LPSolveStats:
self._relax()
streams: List[Any] = [StringIO()]
if tee:
streams += [sys.stdout]
with _RedirectOutput(streams):
results = self._pyomo_solver.solve(tee=True)
self._termination_condition = results["Solver"][0]["Termination condition"]
self._restore_integrality()
opt_value = None
self._has_lp_solution = False
self._has_mip_solution = False
if not self.is_infeasible():
opt_value = results["Problem"][0]["Lower bound"]
self._has_lp_solution = True
return LPSolveStats(
lp_value=opt_value,
lp_log=streams[0].getvalue(),
lp_wallclock_time=results["Solver"][0]["Wallclock time"],
)
def _clear_warm_start(self) -> None:
for var in self._all_vars:
if not var.fixed:
var.value = None
self._is_warm_start_available = False
@staticmethod
def __extract(log, regexp, default=None):
def _extract(
log: str,
regexp: Optional[str],
default: Optional[str] = None,
) -> Optional[str]:
if regexp is None:
return default
value = default
for line in log.splitlines():
matches = re.findall(regexp, line)
@@ -185,68 +499,150 @@ class BasePyomoSolver(InternalSolver):
value = matches[0]
return value
def _extract_warm_start_value(self, log):
value = self.__extract(log, self._get_warm_start_regexp())
if value is not None:
value = float(value)
return value
def _extract_node_count(self, log: str) -> Optional[int]:
value = self._extract(log, self._get_node_count_regexp())
if value is None:
return None
return int(value)
def _extract_node_count(self, log):
return int(self.__extract(log,
self._get_node_count_regexp(),
default=1))
def _extract_warm_start_value(self, log: str) -> Optional[float]:
value = self._extract(log, self._get_warm_start_regexp())
if value is None:
return None
return float(value)
def set_threads(self, threads):
key = self._get_threads_option_name()
self._pyomo_solver.options[key] = threads
def _get_node_count_regexp(self) -> Optional[str]:
return None
def set_time_limit(self, time_limit):
key = self._get_time_limit_option_name()
self._pyomo_solver.options[key] = time_limit
def _get_warm_start_regexp(self) -> Optional[str]:
return None
def set_node_limit(self, node_limit):
key = self._get_node_limit_option_name()
self._pyomo_solver.options[key] = node_limit
def _parse_pyomo_expr(self, expr: Any) -> Dict[str, float]:
lhs = {}
if isinstance(expr, SumExpression):
for term in expr._args_:
if isinstance(term, MonomialTermExpression):
lhs[term._args_[1].name] = float(term._args_[0])
elif isinstance(term, _GeneralVarData):
lhs[term.name] = 1.0
else:
raise Exception(f"Unknown term type: {term.__class__.__name__}")
elif isinstance(expr, _GeneralVarData):
lhs[expr.name] = 1.0
else:
raise Exception(f"Unknown expression type: {expr.__class__.__name__}")
return lhs
def set_gap_tolerance(self, gap_tolerance):
key = self._get_gap_tolerance_option_name()
self._pyomo_solver.options[key] = gap_tolerance
def _relax(self) -> None:
for var in self._bin_vars:
lb, ub = var.bounds
var.setlb(lb)
var.setub(ub)
var.domain = pyomo.core.base.set_types.Reals
self._pyomo_solver.update_var(var)
def get_constraint_ids(self):
return list(self._cname_to_constr.keys())
def _restore_integrality(self) -> None:
for var in self._bin_vars:
var.domain = pyomo.core.base.set_types.Binary
self._pyomo_solver.update_var(var)
def extract_constraint(self, cid):
raise Exception("Not implemented")
def _update_obj(self) -> None:
self._obj_sense = "max"
if self._pyomo_solver._objective.sense == pyomo.core.kernel.objective.minimize:
self._obj_sense = "min"
def is_constraint_satisfied(self, cobj):
raise Exception("Not implemented")
def _update_vars(self) -> None:
assert self.model is not None
self._all_vars = []
self._bin_vars = []
self._varname_to_var = {}
for var in self.model.component_objects(Var):
for idx in var:
varname = f"{var.name}[{idx}]".encode()
if idx is None:
varname = var.name.encode()
self._varname_to_var[varname] = var[idx]
self._all_vars += [var[idx]]
if var[idx].domain == pyomo.core.base.set_types.Binary:
self._bin_vars += [var[idx]]
for obj in self.model.component_objects(Objective):
self._obj = self._parse_pyomo_expr(obj.expr)
break
@abstractmethod
def _get_warm_start_regexp(self):
pass
def _update_constrs(self) -> None:
assert self.model is not None
self._cname_to_constr.clear()
for constr in self.model.component_objects(pyomo.core.Constraint):
if isinstance(constr, pe.ConstraintList):
for idx in constr:
self._cname_to_constr[f"{constr.name}[{idx}]"] = constr[idx]
else:
self._cname_to_constr[constr.name] = constr
@abstractmethod
def _get_node_count_regexp(self):
pass
@abstractmethod
def _get_threads_option_name(self):
pass
class PyomoTestInstanceInfeasible(Instance):
@overrides
def to_model(self) -> pe.ConcreteModel:
model = pe.ConcreteModel()
model.x = pe.Var([0], domain=pe.Binary)
model.OBJ = pe.Objective(expr=model.x[0], sense=pe.maximize)
model.eq = pe.Constraint(expr=model.x[0] >= 2)
return model
@abstractmethod
def _get_time_limit_option_name(self):
pass
@abstractmethod
def _get_node_limit_option_name(self):
pass
class PyomoTestInstanceKnapsack(Instance):
"""
Simpler (one-dimensional) Knapsack Problem, used for testing.
"""
@abstractmethod
def _get_gap_tolerance_option_name(self):
pass
def __init__(
self,
weights: List[float],
prices: List[float],
capacity: float,
) -> None:
super().__init__()
self.weights = weights
self.prices = prices
self.capacity = capacity
self.n = len(weights)
def relax(self):
raise Exception("not implemented")
@overrides
def to_model(self) -> pe.ConcreteModel:
model = pe.ConcreteModel()
items = range(len(self.weights))
model.x = pe.Var(items, domain=pe.Binary)
model.z = pe.Var(domain=pe.Reals, bounds=(0, self.capacity))
model.OBJ = pe.Objective(
expr=sum(model.x[v] * self.prices[v] for v in items),
sense=pe.maximize,
)
model.eq_capacity = pe.Constraint(
expr=sum(model.x[v] * self.weights[v] for v in items) == model.z
)
return model
def get_constraint_slacks(self):
raise Exception("not implemented")
@overrides
def get_instance_features(self) -> np.ndarray:
return np.array(
[
self.capacity,
np.average(self.weights),
]
)
@overrides
def get_variable_features(self, names: np.ndarray) -> np.ndarray:
return np.vstack(
[
[[self.weights[i], self.prices[i]] for i in range(self.n)],
[0.0, 0.0],
]
)
@overrides
def get_variable_categories(self, names: np.ndarray) -> np.ndarray:
return np.array(
["default" if n.decode().startswith("x") else "" for n in names],
dtype="S",
)

View File

@@ -1,49 +1,49 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import Optional
from overrides import overrides
from pyomo import environ as pe
from scipy.stats import randint
from .base import BasePyomoSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
from miplearn.types import SolverParams
class CplexPyomoSolver(BasePyomoSolver):
def __init__(self, options=None):
"""
Creates a new CPLEX solver, accessed through Pyomo.
An InternalSolver that uses CPLEX and the Pyomo modeling language.
Parameters
----------
options: dict
params: dict
Dictionary of options to pass to the Pyomo solver. For example,
{"mip_display": 5} to increase the log verbosity.
"""
super().__init__()
self._pyomo_solver = pe.SolverFactory('cplex_persistent')
self._pyomo_solver.options["randomseed"] = randint(low=0, high=1000).rvs()
self._pyomo_solver.options["mip_display"] = 4
if options is not None:
for (key, value) in options.items():
self._pyomo_solver.options[key] = value
def _get_warm_start_regexp(self):
def __init__(
self,
params: Optional[SolverParams] = None,
) -> None:
if params is None:
params = {}
params["randomseed"] = randint(low=0, high=1000).rvs()
if "mip_display" not in params.keys():
params["mip_display"] = 4
super().__init__(
solver_factory=pe.SolverFactory("cplex_persistent"),
params=params,
)
@overrides
def _get_warm_start_regexp(self) -> str:
return "MIP start .* with objective ([0-9.e+-]*)\\."
def _get_node_count_regexp(self):
@overrides
def _get_node_count_regexp(self) -> str:
return "^[ *] *([0-9]+)"
def _get_threads_option_name(self):
return "threads"
def _get_time_limit_option_name(self):
return "timelimit"
def _get_node_limit_option_name(self):
return "mip_limits_nodes"
def _get_gap_tolerance_option_name(self):
return "mip_tolerances_mipgap"
def set_branching_priorities(self, priorities):
raise NotImplementedError
@overrides
def clone(self) -> "CplexPyomoSolver":
return CplexPyomoSolver(params=self.params)

View File

@@ -1,63 +1,55 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import sys
import logging
from io import StringIO
from typing import Optional
from overrides import overrides
from pyomo import environ as pe
from scipy.stats import randint
from .base import BasePyomoSolver
from .. import RedirectOutput
from miplearn.solvers.pyomo.base import BasePyomoSolver
from miplearn.types import SolverParams
logger = logging.getLogger(__name__)
class GurobiPyomoSolver(BasePyomoSolver):
def __init__(self,
options=None):
"""
Creates a new Gurobi solver, accessed through Pyomo.
An InternalSolver that uses Gurobi and the Pyomo modeling language.
Parameters
----------
options: dict
params: dict
Dictionary of options to pass to the Pyomo solver. For example,
{"Threads": 4} to set the number of threads.
"""
super().__init__()
self._pyomo_solver = pe.SolverFactory('gurobi_persistent')
self._pyomo_solver.options["Seed"] = randint(low=0, high=1000).rvs()
if options is not None:
for (key, value) in options.items():
self._pyomo_solver.options[key] = value
def _extract_node_count(self, log):
def __init__(
self,
params: Optional[SolverParams] = None,
) -> None:
if params is None:
params = {}
params["seed"] = randint(low=0, high=1000).rvs()
super().__init__(
solver_factory=pe.SolverFactory("gurobi_persistent"),
params=params,
)
@overrides
def clone(self) -> "GurobiPyomoSolver":
return GurobiPyomoSolver(params=self.params)
@overrides
def _extract_node_count(self, log: str) -> int:
return max(1, int(self._pyomo_solver._solver_model.getAttr("NodeCount")))
def _get_warm_start_regexp(self):
@overrides
def _get_warm_start_regexp(self) -> str:
return "MIP start with objective ([0-9.e+-]*)"
def _get_node_count_regexp(self):
@overrides
def _get_node_count_regexp(self) -> Optional[str]:
return None
def _get_threads_option_name(self):
return "Threads"
def _get_time_limit_option_name(self):
return "TimeLimit"
def _get_node_limit_option_name(self):
return "NodeLimit"
def _get_gap_tolerance_option_name(self):
return "MIPGap"
def set_branching_priorities(self, priorities):
from gurobipy import GRB
for varname in priorities.keys():
var = self._varname_to_var[varname]
for (index, priority) in priorities[varname].items():
gvar = self._pyomo_solver._pyomo_var_to_solver_var_map[var[index]]
gvar.setAttr(GRB.Attr.BranchPriority, int(round(priority)))

View File

@@ -0,0 +1,43 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import Optional
from overrides import overrides
from pyomo import environ as pe
from scipy.stats import randint
from miplearn.solvers.pyomo.base import BasePyomoSolver
from miplearn.types import SolverParams
logger = logging.getLogger(__name__)
class XpressPyomoSolver(BasePyomoSolver):
"""
An InternalSolver that uses XPRESS and the Pyomo modeling language.
Parameters
----------
params: dict
Dictionary of options to pass to the Pyomo solver. For example,
{"Threads": 4} to set the number of threads.
"""
def __init__(
self,
params: Optional[SolverParams] = None,
) -> None:
if params is None:
params = {}
params["randomseed"] = randint(low=0, high=1000).rvs()
super().__init__(
solver_factory=pe.SolverFactory("xpress_persistent"),
params=params,
)
@overrides
def clone(self) -> "XpressPyomoSolver":
return XpressPyomoSolver(params=self.params)

View File

@@ -1,26 +1,299 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import BasePyomoSolver, GurobiSolver, GurobiPyomoSolver, CplexPyomoSolver
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
from typing import Any, List
import numpy as np
from miplearn.solvers.internal import InternalSolver, Variables, Constraints
inf = float("inf")
def _get_instance(solver):
if issubclass(solver, BasePyomoSolver) or isinstance(solver, BasePyomoSolver):
return KnapsackInstance(
weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.],
capacity=67.,
# NOTE:
# This file is in the main source folder, so that it can be called from Julia.
def _filter_attrs(allowed_keys: List[str], obj: Any) -> Any:
for key in obj.__dict__.keys():
if key not in allowed_keys:
setattr(obj, key, None)
return obj
def run_internal_solver_tests(solver: InternalSolver) -> None:
run_basic_usage_tests(solver.clone())
run_warm_start_tests(solver.clone())
run_infeasibility_tests(solver.clone())
run_iteration_cb_tests(solver.clone())
if solver.are_callbacks_supported():
run_lazy_cb_tests(solver.clone())
def run_basic_usage_tests(solver: InternalSolver) -> None:
# Create and set instance
instance = solver.build_test_instance_knapsack()
model = instance.to_model()
solver.set_instance(instance, model)
# Fetch variables (after-load)
assert_equals(
solver.get_variables(),
Variables(
names=np.array(["x[0]", "x[1]", "x[2]", "x[3]", "z"], dtype="S"),
lower_bounds=np.array([0.0, 0.0, 0.0, 0.0, 0.0]),
upper_bounds=np.array([1.0, 1.0, 1.0, 1.0, 67.0]),
types=np.array(["B", "B", "B", "B", "C"], dtype="S"),
obj_coeffs=np.array([505.0, 352.0, 458.0, 220.0, 0.0]),
),
)
if issubclass(solver, GurobiSolver) or isinstance(solver, GurobiSolver):
return GurobiKnapsackInstance(
weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.],
capacity=67.,
# Fetch constraints (after-load)
assert_equals(
solver.get_constraints(),
Constraints(
names=np.array(["eq_capacity"], dtype="S"),
rhs=np.array([0.0]),
lhs=[
[
(b"x[0]", 23.0),
(b"x[1]", 26.0),
(b"x[2]", 20.0),
(b"x[3]", 18.0),
(b"z", -1.0),
],
],
senses=np.array(["="], dtype="S"),
),
)
assert False
# Solve linear programming relaxation
lp_stats = solver.solve_lp()
assert not solver.is_infeasible()
assert lp_stats.lp_value is not None
assert_equals(round(lp_stats.lp_value, 3), 1287.923)
assert lp_stats.lp_log is not None
assert len(lp_stats.lp_log) > 100
assert lp_stats.lp_wallclock_time is not None
assert lp_stats.lp_wallclock_time > 0
# Fetch variables (after-lp)
assert_equals(
solver.get_variables(with_static=False),
_filter_attrs(
solver.get_variable_attrs(),
Variables(
names=np.array(["x[0]", "x[1]", "x[2]", "x[3]", "z"], dtype="S"),
basis_status=np.array(["U", "B", "U", "L", "U"], dtype="S"),
reduced_costs=np.array(
[193.615385, 0.0, 187.230769, -23.692308, 13.538462]
),
sa_lb_down=np.array([-inf, -inf, -inf, -0.111111, -inf]),
sa_lb_up=np.array([1.0, 0.923077, 1.0, 1.0, 67.0]),
sa_obj_down=np.array(
[311.384615, 317.777778, 270.769231, -inf, -13.538462]
),
sa_obj_up=np.array([inf, 570.869565, inf, 243.692308, inf]),
sa_ub_down=np.array([0.913043, 0.923077, 0.9, 0.0, 43.0]),
sa_ub_up=np.array([2.043478, inf, 2.2, inf, 69.0]),
values=np.array([1.0, 0.923077, 1.0, 0.0, 67.0]),
),
),
)
# Fetch constraints (after-lp)
assert_equals(
solver.get_constraints(with_static=False),
_filter_attrs(
solver.get_constraint_attrs(),
Constraints(
basis_status=np.array(["N"], dtype="S"),
dual_values=np.array([13.538462]),
names=np.array(["eq_capacity"], dtype="S"),
sa_rhs_down=np.array([-24.0]),
sa_rhs_up=np.array([2.0]),
slacks=np.array([0.0]),
),
),
)
# Solve MIP
mip_stats = solver.solve(
tee=True,
)
assert not solver.is_infeasible()
assert mip_stats.mip_log is not None
assert len(mip_stats.mip_log) > 100
assert mip_stats.mip_lower_bound is not None
assert_equals(mip_stats.mip_lower_bound, 1183.0)
assert mip_stats.mip_upper_bound is not None
assert_equals(mip_stats.mip_upper_bound, 1183.0)
assert mip_stats.mip_sense is not None
assert_equals(mip_stats.mip_sense, "max")
assert mip_stats.mip_wallclock_time is not None
assert isinstance(mip_stats.mip_wallclock_time, float)
assert mip_stats.mip_wallclock_time > 0
# Fetch variables (after-mip)
assert_equals(
solver.get_variables(with_static=False),
_filter_attrs(
solver.get_variable_attrs(),
Variables(
names=np.array(["x[0]", "x[1]", "x[2]", "x[3]", "z"], dtype="S"),
values=np.array([1.0, 0.0, 1.0, 1.0, 61.0]),
),
),
)
# Fetch constraints (after-mip)
assert_equals(
solver.get_constraints(with_static=False),
_filter_attrs(
solver.get_constraint_attrs(),
Constraints(
names=np.array(["eq_capacity"], dtype="S"),
slacks=np.array([0.0]),
),
),
)
# Build new constraint and verify that it is violated
cf = Constraints(
names=np.array(["cut"], dtype="S"),
lhs=[[(b"x[0]", 1.0)]],
rhs=np.array([0.0]),
senses=np.array(["<"], dtype="S"),
)
assert_equals(solver.are_constraints_satisfied(cf), [False])
# Add constraint and verify it affects solution
solver.add_constraints(cf)
assert_equals(
solver.get_constraints(with_static=True),
_filter_attrs(
solver.get_constraint_attrs(),
Constraints(
names=np.array(["eq_capacity", "cut"], dtype="S"),
rhs=np.array([0.0, 0.0]),
lhs=[
[
(b"x[0]", 23.0),
(b"x[1]", 26.0),
(b"x[2]", 20.0),
(b"x[3]", 18.0),
(b"z", -1.0),
],
[
(b"x[0]", 1.0),
],
],
senses=np.array(["=", "<"], dtype="S"),
),
),
)
stats = solver.solve()
assert_equals(stats.mip_lower_bound, 1030.0)
assert_equals(solver.are_constraints_satisfied(cf), [True])
# Remove the new constraint
solver.remove_constraints(np.array(["cut"], dtype="S"))
# New constraint should no longer affect solution
stats = solver.solve()
assert_equals(stats.mip_lower_bound, 1183.0)
def _get_internal_solvers():
return [GurobiPyomoSolver, CplexPyomoSolver, GurobiSolver]
def run_warm_start_tests(solver: InternalSolver) -> None:
instance = solver.build_test_instance_knapsack()
model = instance.to_model()
solver.set_instance(instance, model)
solver.set_warm_start({b"x[0]": 1.0, b"x[1]": 0.0, b"x[2]": 0.0, b"x[3]": 1.0})
stats = solver.solve(tee=True)
if stats.mip_warm_start_value is not None:
assert_equals(stats.mip_warm_start_value, 725.0)
solver.set_warm_start({b"x[0]": 1.0, b"x[1]": 1.0, b"x[2]": 1.0, b"x[3]": 1.0})
stats = solver.solve(tee=True)
assert stats.mip_warm_start_value is None
solver.fix({b"x[0]": 1.0, b"x[1]": 0.0, b"x[2]": 0.0, b"x[3]": 1.0})
stats = solver.solve(tee=True)
assert_equals(stats.mip_lower_bound, 725.0)
assert_equals(stats.mip_upper_bound, 725.0)
def run_infeasibility_tests(solver: InternalSolver) -> None:
instance = solver.build_test_instance_infeasible()
solver.set_instance(instance)
mip_stats = solver.solve()
assert solver.is_infeasible()
assert solver.get_solution() is None
assert mip_stats.mip_upper_bound is None
assert mip_stats.mip_lower_bound is None
lp_stats = solver.solve_lp()
assert solver.get_solution() is None
assert lp_stats.lp_value is None
def run_iteration_cb_tests(solver: InternalSolver) -> None:
instance = solver.build_test_instance_knapsack()
solver.set_instance(instance)
count = 0
def custom_iteration_cb() -> bool:
nonlocal count
count += 1
return count < 5
solver.solve(iteration_cb=custom_iteration_cb)
assert_equals(count, 5)
def run_lazy_cb_tests(solver: InternalSolver) -> None:
instance = solver.build_test_instance_knapsack()
model = instance.to_model()
def lazy_cb(cb_solver: InternalSolver, cb_model: Any) -> None:
relsol = cb_solver.get_solution()
assert relsol is not None
assert relsol[b"x[0]"] is not None
if relsol[b"x[0]"] > 0:
instance.enforce_lazy_constraint(cb_solver, cb_model, b"cut")
solver.set_instance(instance, model)
solver.solve(lazy_cb=lazy_cb)
solution = solver.get_solution()
assert solution is not None
assert_equals(solution[b"x[0]"], 0.0)
def _equals_preprocess(obj: Any) -> Any:
if isinstance(obj, np.ndarray):
if obj.dtype == "float64":
return np.round(obj, decimals=6).tolist()
else:
return obj.tolist()
elif isinstance(obj, (int, str, bool, np.bool_, np.bytes_, bytes, bytearray)):
return obj
elif isinstance(obj, float):
return round(obj, 6)
elif isinstance(obj, list):
return [_equals_preprocess(i) for i in obj]
elif isinstance(obj, tuple):
return tuple(_equals_preprocess(i) for i in obj)
elif obj is None:
return None
elif isinstance(obj, dict):
return {k: _equals_preprocess(v) for (k, v) in obj.items()}
else:
for key in obj.__dict__.keys():
obj.__dict__[key] = _equals_preprocess(obj.__dict__[key])
return obj
def assert_equals(left: Any, right: Any) -> None:
left = _equals_preprocess(left)
right = _equals_preprocess(right)
assert left == right, f"left:\n{left}\nright:\n{right}"

View File

@@ -1,156 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from io import StringIO
import pyomo.environ as pe
from miplearn import BasePyomoSolver, GurobiSolver
from miplearn.solvers import RedirectOutput
from . import _get_instance, _get_internal_solvers
logger = logging.getLogger(__name__)
def test_redirect_output():
import sys
original_stdout = sys.stdout
io = StringIO()
with RedirectOutput([io]):
print("Hello world")
assert sys.stdout == original_stdout
assert io.getvalue() == "Hello world\n"
def test_internal_solver_warm_starts():
for solver_class in _get_internal_solvers():
logger.info("Solver: %s" % solver_class)
instance = _get_instance(solver_class)
model = instance.to_model()
solver = solver_class()
solver.set_instance(instance, model)
solver.set_warm_start({
"x": {
0: 1.0,
1: 0.0,
2: 0.0,
3: 1.0,
}
})
stats = solver.solve(tee=True)
assert stats["Warm start value"] == 725.0
solver.set_warm_start({
"x": {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
}
})
stats = solver.solve(tee=True)
assert stats["Warm start value"] is None
solver.fix({
"x": {
0: 1.0,
1: 0.0,
2: 0.0,
3: 1.0,
}
})
stats = solver.solve(tee=True)
assert stats["Lower bound"] == 725.0
assert stats["Upper bound"] == 725.0
def test_internal_solver():
for solver_class in _get_internal_solvers():
logger.info("Solver: %s" % solver_class)
instance = _get_instance(solver_class)
model = instance.to_model()
solver = solver_class()
solver.set_instance(instance, model)
stats = solver.solve_lp()
assert round(stats["Optimal value"], 3) == 1287.923
solution = solver.get_solution()
assert round(solution["x"][0], 3) == 1.000
assert round(solution["x"][1], 3) == 0.923
assert round(solution["x"][2], 3) == 1.000
assert round(solution["x"][3], 3) == 0.000
stats = solver.solve(tee=True)
assert len(stats["Log"]) > 100
assert stats["Lower bound"] == 1183.0
assert stats["Upper bound"] == 1183.0
assert stats["Sense"] == "max"
assert isinstance(stats["Wallclock time"], float)
assert isinstance(stats["Nodes"], int)
solution = solver.get_solution()
assert solution["x"][0] == 1.0
assert solution["x"][1] == 0.0
assert solution["x"][2] == 1.0
assert solution["x"][3] == 1.0
# Add a brand new constraint
if isinstance(solver, BasePyomoSolver):
model.cut = pe.Constraint(expr=model.x[0] <= 0.0, name="cut")
solver.add_constraint(model.cut)
elif isinstance(solver, GurobiSolver):
x = model.getVarByName("x[0]")
solver.add_constraint(x <= 0.0, name="cut")
else:
raise Exception("Illegal state")
# New constraint should affect solution and should be listed in
# constraint ids
assert solver.get_constraint_ids() == ["eq_capacity", "cut"]
stats = solver.solve()
assert stats["Lower bound"] == 1030.0
if isinstance(solver, GurobiSolver):
# Extract new constraint
cobj = solver.extract_constraint("cut")
# New constraint should no longer affect solution and should no longer
# be listed in constraint ids
assert solver.get_constraint_ids() == ["eq_capacity"]
stats = solver.solve()
assert stats["Lower bound"] == 1183.0
# New constraint should not be satisfied by current solution
assert not solver.is_constraint_satisfied(cobj)
# Re-add constraint
solver.add_constraint(cobj)
# Constraint should affect solution again
assert solver.get_constraint_ids() == ["eq_capacity", "cut"]
stats = solver.solve()
assert stats["Lower bound"] == 1030.0
# New constraint should now be satisfied
assert solver.is_constraint_satisfied(cobj)
def test_iteration_cb():
for solver_class in _get_internal_solvers():
logger.info("Solver: %s" % solver_class)
instance = _get_instance(solver_class)
solver = solver_class()
solver.set_instance(instance)
count = 0
def custom_iteration_cb():
nonlocal count
count += 1
return count < 5
solver.solve(iteration_cb=custom_iteration_cb)
assert count == 5

View File

@@ -1,27 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from . import _get_instance
from ... import GurobiSolver
logger = logging.getLogger(__name__)
def test_lazy_cb():
solver = GurobiSolver()
instance = _get_instance(solver)
model = instance.to_model()
def lazy_cb(cb_solver, cb_model):
logger.info("x[0] = %.f" % cb_solver.get_value("x", 0))
cobj = (cb_model.getVarByName("x[0]") * 1.0, "<", 0.0, "cut")
if not cb_solver.is_constraint_satisfied(cobj):
cb_solver.add_constraint(cobj)
solver.set_instance(instance, model)
solver.solve(lazy_cb=lazy_cb)
solution = solver.get_solution()
assert solution["x"][0] == 0.0

View File

@@ -1,67 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import pickle
import tempfile
from miplearn import DynamicLazyConstraintsComponent
from miplearn import LearningSolver
from . import _get_instance, _get_internal_solvers
logger = logging.getLogger(__name__)
def test_learning_solver():
for mode in ["exact", "heuristic"]:
for internal_solver in _get_internal_solvers():
logger.info("Solver: %s" % internal_solver)
instance = _get_instance(internal_solver)
solver = LearningSolver(time_limit=300,
gap_tolerance=1e-3,
threads=1,
solver=internal_solver,
mode=mode)
solver.solve(instance)
assert instance.solution["x"][0] == 1.0
assert instance.solution["x"][1] == 0.0
assert instance.solution["x"][2] == 1.0
assert instance.solution["x"][3] == 1.0
assert instance.lower_bound == 1183.0
assert instance.upper_bound == 1183.0
assert round(instance.lp_solution["x"][0], 3) == 1.000
assert round(instance.lp_solution["x"][1], 3) == 0.923
assert round(instance.lp_solution["x"][2], 3) == 1.000
assert round(instance.lp_solution["x"][3], 3) == 0.000
assert round(instance.lp_value, 3) == 1287.923
assert instance.found_violated_lazy_constraints == []
assert instance.found_violated_user_cuts == []
assert len(instance.solver_log) > 100
solver.fit([instance])
solver.solve(instance)
# Assert solver is picklable
with tempfile.TemporaryFile() as file:
pickle.dump(solver, file)
def test_parallel_solve():
for internal_solver in _get_internal_solvers():
instances = [_get_instance(internal_solver) for _ in range(10)]
solver = LearningSolver(solver=internal_solver)
results = solver.parallel_solve(instances, n_jobs=3)
assert len(results) == 10
for instance in instances:
assert len(instance.solution["x"].keys()) == 4
def test_add_components():
solver = LearningSolver(components=[])
solver.add(DynamicLazyConstraintsComponent())
solver.add(DynamicLazyConstraintsComponent())
assert len(solver.components) == 1
assert "DynamicLazyConstraintsComponent" in solver.components

View File

@@ -1,25 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.knapsack import KnapsackInstance
def get_test_pyomo_instances():
instances = [
KnapsackInstance(
weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.],
capacity=67.,
),
KnapsackInstance(
weights=[25., 30., 22., 18.],
prices=[500., 365., 420., 150.],
capacity=70.,
),
]
models = [instance.to_model() for instance in instances]
solver = LearningSolver()
for i in range(len(instances)):
solver.solve(instances[i], models[i])
return instances, models

View File

@@ -1,36 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import os.path
from miplearn import LearningSolver, BenchmarkRunner
from miplearn.problems.stab import MaxWeightStableSetGenerator
from scipy.stats import randint
def test_benchmark():
# Generate training and test instances
train_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(5)
test_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(3)
# Training phase...
training_solver = LearningSolver()
training_solver.parallel_solve(train_instances, n_jobs=10)
# Test phase...
test_solvers = {
"Strategy A": LearningSolver(),
"Strategy B": LearningSolver(),
}
benchmark = BenchmarkRunner(test_solvers)
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
assert benchmark.raw_results().values.shape == (12,16)
benchmark.save_results("/tmp/benchmark.csv")
assert os.path.isfile("/tmp/benchmark.csv")
benchmark = BenchmarkRunner(test_solvers)
benchmark.load_results("/tmp/benchmark.csv")
assert benchmark.raw_results().values.shape == (12,16)

View File

@@ -1,62 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn.problems.knapsack import KnapsackInstance
from miplearn import (LearningSolver,
SolutionExtractor,
InstanceFeaturesExtractor,
VariableFeaturesExtractor,
)
import numpy as np
import pyomo.environ as pe
def _get_instances():
instances = [
KnapsackInstance(weights=[1., 2., 3.],
prices=[10., 20., 30.],
capacity=2.5,
),
KnapsackInstance(weights=[3., 4., 5.],
prices=[20., 30., 40.],
capacity=4.5,
),
]
models = [instance.to_model() for instance in instances]
solver = LearningSolver()
for (i, instance) in enumerate(instances):
solver.solve(instances[i], models[i])
return instances, models
def test_solution_extractor():
instances, models = _get_instances()
features = SolutionExtractor().extract(instances)
assert isinstance(features, dict)
assert "default" in features.keys()
assert isinstance(features["default"], np.ndarray)
assert features["default"].shape == (6, 2)
assert features["default"].ravel().tolist() == [
1., 0.,
0., 1.,
1., 0.,
1., 0.,
0., 1.,
1., 0.,
]
def test_instance_features_extractor():
instances, models = _get_instances()
features = InstanceFeaturesExtractor().extract(instances)
assert features.shape == (2,3)
def test_variable_features_extractor():
instances, models = _get_instances()
features = VariableFeaturesExtractor().extract(instances)
assert isinstance(features, dict)
assert "default" in features
assert features["default"].shape == (6,5)

52
miplearn/types.py Normal file
View File

@@ -0,0 +1,52 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import Optional, Dict, Callable, Any, Union, TYPE_CHECKING
from mypy_extensions import TypedDict
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
from miplearn.solvers.learning import InternalSolver
Category = bytes
ConstraintName = bytes
ConstraintCategory = bytes
IterationCallback = Callable[[], bool]
LazyCallback = Callable[[Any, Any], None]
SolverParams = Dict[str, Any]
UserCutCallback = Callable[["InternalSolver", Any], None]
Solution = Dict[bytes, Optional[float]]
LearningSolveStats = TypedDict(
"LearningSolveStats",
{
"Gap": Optional[float],
"Instance": Union[str, int],
"lp_log": str,
"lp_value": Optional[float],
"lp_wallclock_time": Optional[float],
"mip_lower_bound": Optional[float],
"mip_log": str,
"Mode": str,
"mip_nodes": Optional[int],
"Objective: Predicted lower bound": float,
"Objective: Predicted upper bound": float,
"Primal: Free": int,
"Primal: One": int,
"Primal: Zero": int,
"Sense": str,
"Solver": str,
"mip_upper_bound": Optional[float],
"mip_wallclock_time": float,
"mip_warm_start_value": Optional[float],
"LazyStatic: Removed": int,
"LazyStatic: Kept": int,
"LazyStatic: Restored": int,
"LazyStatic: Iterations": int,
"UserCuts: Added ahead-of-time": int,
"UserCuts: Added in callback": int,
},
total=False,
)

Some files were not shown because too many files have changed in this diff Show More