From 2dc66696f9046e01410812626da4416a54397b74 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Tue, 4 Feb 2020 13:29:53 -0600 Subject: [PATCH 1/8] Initial version of TSP generator --- miplearn/problems/tsp.py | 90 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 miplearn/problems/tsp.py diff --git a/miplearn/problems/tsp.py b/miplearn/problems/tsp.py new file mode 100644 index 0000000..faa9194 --- /dev/null +++ b/miplearn/problems/tsp.py @@ -0,0 +1,90 @@ +# MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved. +# Written by Alinson S. Xavier + +import numpy as np +import pyomo.environ as pe +from miplearn import Instance +import random + + +class TravelingSalesmanChallengeA: + """Fixed set of cities, small perturbation to travel speed.""" + def __init__(): + self.generator = TravelingSalesmanGenerator(speed=uniform(loc=0.9, scale=0.2), + x=uniform(loc=0.0, loc=1000.0), + y=uniform(loc=0.0, loc=1000.0), + pn=0.0, + n=randint(low=100, high=100), + fix_cities=True) + + def get_training_instances(): + return self.generator.generate(500) + + def get_test_instances(): + return self.generator.generate(100) + + +class TravelingSalesmanGenerator: + """Random generator for the Traveling Salesman Problem. + + The generator starts by randomly selecing n points with coordinates (x_i, y_i), where n, x_i + and y_i are random variables. The time required to travel from a pair of cities is calculated + by: (i) computing the euclidean distance between the cities, (ii) sampling a random variable + speed_i, (iii) dividing the two numbers. + + If fix_cities is True, the cities and travel times will be calculated only once, during the + constructor. Each time an instance is generated, however, each city will have probability pv + of being removed from the list. If fix_cities if False, then the cities and travel times will + be resampled each time an instance is generated. The probability pn is not used in this case. + + All random variables are independent. + """ + + def __init__(self, + speed=uniform(loc=0.75, scale=0.5), + x=uniform(loc=0.0, loc=1000.0), + y=uniform(loc=0.0, loc=1000.0), + pn=0.0, + n=randint(low=100, high=100), + fix_cities=True): + """Initializes the problem generator. + + Arguments + --------- + speed: rv_continuous + Probability distribution for travel speed. + x: rv_continuous + Probability distribution for the x-coordinate of each city. + y: rv_continuous + Probability distribution for the y-coordinate of each city. + pn: float + Probability of a city being removed from the list. Only used if fix_cities=True. + n: rv_discrete + Probability distribution for the number of cities. + fix_cities: bool + If true, cities will be resampled for every generated instance. Otherwise, list of + cities will be computed once, during the constructor. + """ + pass + + def generate(self, n_samples): + pass + + +class TravelingSalesmanInstance(Instance): + """An instance ot the Traveling Salesman Problem. + + Given a list of cities and the distance between each pair of cities, the problem asks for the + shortest route starting at the first city, visiting each other city exactly once, then + returning to the first city. This problem is a generalization of the Hamiltonian path problem, + one of Karp's 21 NP-complete problems. + """ + + def __init__(self, n_cities, distances): + assert isinstance(distances, np.array) + assert distances.shape == (n_cities, n_cities) + self.n_cities = n_cities + self.distances = distances + pass + From f22aecb14dd2a7bf08aeeb719deefff32d8ed5d2 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Mon, 24 Feb 2020 14:22:04 -0600 Subject: [PATCH 2/8] Add missing docs files --- docs-src/css/custom.css | 0 docs/css/custom.css | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs-src/css/custom.css create mode 100644 docs/css/custom.css diff --git a/docs-src/css/custom.css b/docs-src/css/custom.css new file mode 100644 index 0000000..e69de29 diff --git a/docs/css/custom.css b/docs/css/custom.css new file mode 100644 index 0000000..e69de29 From 7a01d9cbcf18c952c4ee8a6dd35b039ed9b269cf Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Mon, 24 Feb 2020 21:56:52 -0600 Subject: [PATCH 3/8] Implement TSP generator and LazyConstraintsComponent --- miplearn/__init__.py | 1 + miplearn/components/branching.py | 26 ----- miplearn/components/component.py | 4 - miplearn/components/lazy.py | 48 +++++++++ miplearn/components/objective.py | 3 - miplearn/components/primal.py | 3 - miplearn/problems/tests/test_tsp.py | 68 ++++++++++++ miplearn/problems/tsp.py | 162 ++++++++++++++++++++-------- miplearn/solvers.py | 41 ++++--- 9 files changed, 266 insertions(+), 90 deletions(-) create mode 100644 miplearn/components/lazy.py create mode 100644 miplearn/problems/tests/test_tsp.py diff --git a/miplearn/__init__.py b/miplearn/__init__.py index 8ee19d4..97443b8 100644 --- a/miplearn/__init__.py +++ b/miplearn/__init__.py @@ -10,6 +10,7 @@ from .extractors import (SolutionExtractor, ) from .components.component import Component from .components.objective import ObjectiveValueComponent +from .components.lazy import LazyConstraintsComponent from .components.primal import (PrimalSolutionComponent, AdaptivePredictor, ) diff --git a/miplearn/components/branching.py b/miplearn/components/branching.py index 8d29396..f35886e 100644 --- a/miplearn/components/branching.py +++ b/miplearn/components/branching.py @@ -118,29 +118,3 @@ class BranchPriorityComponent(Component): instance_features = instance.get_instance_features() var_features = instance.get_variable_features(var, index) return np.hstack([instance_features, var_features]) - - def merge(self, other_components): - keys = set(self.x_train.keys()) - for comp in other_components: - self.pending_instances += comp.pending_instances - keys = keys.union(set(comp.x_train.keys())) - - # Merge x_train and y_train - for key in keys: - x_train_submatrices = [comp.x_train[key] - for comp in other_components - if key in comp.x_train.keys()] - y_train_submatrices = [comp.y_train[key] - for comp in other_components - if key in comp.y_train.keys()] - if key in self.x_train.keys(): - x_train_submatrices += [self.x_train[key]] - y_train_submatrices += [self.y_train[key]] - self.x_train[key] = np.vstack(x_train_submatrices) - self.y_train[key] = np.vstack(y_train_submatrices) - - # Merge trained ML predictors - for comp in other_components: - for key in comp.predictors.keys(): - if key not in self.predictors.keys(): - self.predictors[key] = comp.predictors[key] \ No newline at end of file diff --git a/miplearn/components/component.py b/miplearn/components/component.py index fe4ca6a..fba3bf1 100644 --- a/miplearn/components/component.py +++ b/miplearn/components/component.py @@ -18,10 +18,6 @@ class Component(ABC): def after_solve(self, solver, instance, model): pass - @abstractmethod - def merge(self, other): - pass - @abstractmethod def fit(self, training_instances): pass diff --git a/miplearn/components/lazy.py b/miplearn/components/lazy.py new file mode 100644 index 0000000..06df042 --- /dev/null +++ b/miplearn/components/lazy.py @@ -0,0 +1,48 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +from .component import Component +from ..extractors import * + +from abc import ABC, abstractmethod +from copy import deepcopy +import numpy as np +from sklearn.pipeline import make_pipeline +from sklearn.linear_model import LogisticRegression +from sklearn.preprocessing import StandardScaler +from sklearn.model_selection import cross_val_score +from sklearn.metrics import roc_curve +from sklearn.neighbors import KNeighborsClassifier +from tqdm.auto import tqdm +import pyomo.environ as pe +import logging +logger = logging.getLogger(__name__) + + +class LazyConstraintsComponent(Component): + """ + A component that predicts which lazy constraints to enforce. + """ + + def __init__(self): + self.violations = set() + + def before_solve(self, solver, instance, model): + logger.info("Enforcing %d lazy constraints" % len(self.violations)) + for v in self.violations: + cut = instance.build_lazy_constraint(model, v) + solver.internal_solver.add_constraint(cut) + + def after_solve(self, solver, instance, model): + pass + + def fit(self, training_instances): + for instance in training_instances: + if not hasattr(instance, "found_violations"): + continue + for v in instance.found_violations: + self.violations.add(v) + + def predict(self, instance, model=None): + return self.violations diff --git a/miplearn/components/objective.py b/miplearn/components/objective.py index ecf2f72..bc32097 100644 --- a/miplearn/components/objective.py +++ b/miplearn/components/objective.py @@ -30,9 +30,6 @@ class ObjectiveValueComponent(Component): def after_solve(self, solver, instance, model): pass - def merge(self, other): - pass - def fit(self, training_instances): features = InstanceFeaturesExtractor().extract(training_instances) ub = ObjectiveValueExtractor(kind="upper bound").extract(training_instances) diff --git a/miplearn/components/primal.py b/miplearn/components/primal.py index 92d7542..ba2f956 100644 --- a/miplearn/components/primal.py +++ b/miplearn/components/primal.py @@ -200,6 +200,3 @@ class PrimalSolutionComponent(Component): if ws[i, 1] >= self.thresholds[category, label]: solution[var][index] = label return solution - - def merge(self, other_components): - pass diff --git a/miplearn/problems/tests/test_tsp.py b/miplearn/problems/tests/test_tsp.py new file mode 100644 index 0000000..d6182b5 --- /dev/null +++ b/miplearn/problems/tests/test_tsp.py @@ -0,0 +1,68 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +from miplearn import LearningSolver +from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance +import numpy as np +from numpy.linalg import norm +from scipy.spatial.distance import pdist, squareform +from scipy.stats import uniform, randint + + +def test_generator(): + instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), + y=uniform(loc=0.0, scale=1000.0), + n=randint(low=100, high=101), + gamma=uniform(loc=0.95, scale=0.1), + fix_cities=True).generate(100) + assert len(instances) == 100 + assert instances[0].n_cities == 100 + assert norm(instances[0].distances - instances[0].distances.T) < 1e-6 + d = [instance.distances[0,1] for instance in instances] + assert np.std(d) > 0 + + +def test_instance(): + n_cities = 4 + distances = np.array([ + [0., 1., 2., 1.], + [1., 0., 1., 2.], + [2., 1., 0., 1.], + [1., 2., 1., 0.], + ]) + instance = TravelingSalesmanInstance(n_cities, distances) + solver = LearningSolver() + solver.solve(instance) + x = instance.solution["x"] + assert x[0,1] == 1.0 + assert x[0,2] == 0.0 + assert x[0,3] == 1.0 + assert x[1,2] == 1.0 + assert x[1,3] == 0.0 + assert x[2,3] == 1.0 + assert instance.lower_bound == 4.0 + assert instance.upper_bound == 4.0 + + +def test_subtour(): + n_cities = 6 + cities = np.array([ + [0., 0.], + [1., 0.], + [2., 0.], + [3., 0.], + [0., 1.], + [3., 1.], + ]) + distances = squareform(pdist(cities)) + instance = TravelingSalesmanInstance(n_cities, distances) + solver = LearningSolver() + solver.solve(instance) + x = instance.solution["x"] + assert x[0,1] == 1.0 + assert x[0,4] == 1.0 + assert x[1,2] == 1.0 + assert x[2,3] == 1.0 + assert x[3,5] == 1.0 + assert x[4,5] == 1.0 \ No newline at end of file diff --git a/miplearn/problems/tsp.py b/miplearn/problems/tsp.py index faa9194..4091287 100644 --- a/miplearn/problems/tsp.py +++ b/miplearn/problems/tsp.py @@ -5,71 +5,109 @@ import numpy as np import pyomo.environ as pe from miplearn import Instance +from scipy.stats import uniform, randint +from scipy.spatial.distance import pdist, squareform +from scipy.stats.distributions import rv_frozen +import networkx as nx import random -class TravelingSalesmanChallengeA: - """Fixed set of cities, small perturbation to travel speed.""" - def __init__(): - self.generator = TravelingSalesmanGenerator(speed=uniform(loc=0.9, scale=0.2), - x=uniform(loc=0.0, loc=1000.0), - y=uniform(loc=0.0, loc=1000.0), - pn=0.0, - n=randint(low=100, high=100), - fix_cities=True) +class ChallengeA: + def __init__(self, + seed=42, + n_training_instances=500, + n_test_instances=50, + ): - def get_training_instances(): - return self.generator.generate(500) - - def get_test_instances(): - return self.generator.generate(100) + np.random.seed(seed) + self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), + y=uniform(loc=0.0, scale=1000.0), + n=randint(low=350, high=351), + gamma=uniform(loc=0.95, scale=0.1), + fix_cities=True, + ) + + np.random.seed(seed + 1) + self.training_instances = self.generator.generate(n_training_instances) + + np.random.seed(seed + 2) + self.test_instances = self.generator.generate(n_test_instances) class TravelingSalesmanGenerator: - """Random generator for the Traveling Salesman Problem. + """Random generator for the Traveling Salesman Problem.""" - The generator starts by randomly selecing n points with coordinates (x_i, y_i), where n, x_i - and y_i are random variables. The time required to travel from a pair of cities is calculated - by: (i) computing the euclidean distance between the cities, (ii) sampling a random variable - speed_i, (iii) dividing the two numbers. + def __init__(self, + x=uniform(loc=0.0, scale=1000.0), + y=uniform(loc=0.0, scale=1000.0), + n=randint(low=100, high=101), + gamma=uniform(loc=1.0, scale=0.0), + fix_cities=True, + round=True, + ): + """Initializes the problem generator. + + Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are + sampled independently from the provided probability distributions `n`, `x` and `y`. For each + (unordered) pair of cities (i,j), the distance d[i,j] between them is set to: - If fix_cities is True, the cities and travel times will be calculated only once, during the - constructor. Each time an instance is generated, however, each city will have probability pv - of being removed from the list. If fix_cities if False, then the cities and travel times will - be resampled each time an instance is generated. The probability pn is not used in this case. + d[i,j] = gamma[i,j] \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2} - All random variables are independent. - """ + where gamma is sampled from the provided probability distribution `gamma`. - def __init__(self, - speed=uniform(loc=0.75, scale=0.5), - x=uniform(loc=0.0, loc=1000.0), - y=uniform(loc=0.0, loc=1000.0), - pn=0.0, - n=randint(low=100, high=100), - fix_cities=True): - """Initializes the problem generator. + If fix_cities=True, the list of cities is kept the same for all generated instances. The + gamma values, and therefore also the distances, are still different. + + By default, all distances d[i,j] are rounded to the nearest integer. If `round=False` + is provided, this rounding will be disabled. Arguments --------- - speed: rv_continuous - Probability distribution for travel speed. x: rv_continuous Probability distribution for the x-coordinate of each city. y: rv_continuous Probability distribution for the y-coordinate of each city. - pn: float - Probability of a city being removed from the list. Only used if fix_cities=True. n: rv_discrete Probability distribution for the number of cities. fix_cities: bool - If true, cities will be resampled for every generated instance. Otherwise, list of + If False, cities will be resampled for every generated instance. Otherwise, list of cities will be computed once, during the constructor. + round: bool + If True, distances are rounded to the nearest integer. """ - pass + assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution" + assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution" + assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution" + assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution" + self.x = x + self.y = y + self.n = n + self.gamma = gamma + self.round = round + + if fix_cities: + self.fixed_n, self.fixed_cities = self._generate_cities() + else: + self.fixed_n = None + self.fixed_cities = None def generate(self, n_samples): - pass + def _sample(): + if self.fixed_cities is not None: + n, cities = self.fixed_n, self.fixed_cities + else: + n, cities = self._generate_cities() + distances = squareform(pdist(cities)) * self.gamma.rvs(size=(n, n)) + distances = np.tril(distances) + np.triu(distances.T, 1) + if self.round: + distances = distances.round() + return TravelingSalesmanInstance(n, distances) + return [_sample() for _ in range(n_samples)] + + def _generate_cities(self): + n = self.n.rvs() + cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)]) + return n, cities class TravelingSalesmanInstance(Instance): @@ -82,9 +120,49 @@ class TravelingSalesmanInstance(Instance): """ def __init__(self, n_cities, distances): - assert isinstance(distances, np.array) + assert isinstance(distances, np.ndarray) assert distances.shape == (n_cities, n_cities) self.n_cities = n_cities self.distances = distances - pass + def to_model(self): + self.model = model = pe.ConcreteModel() + self.edges = edges = [(i,j) + for i in range(self.n_cities) + for j in range(i+1, self.n_cities)] + model.x = pe.Var(edges, domain=pe.Binary) + model.obj = pe.Objective(rule=lambda m : sum(m.x[i,j] * self.distances[i,j] + for (i,j) in edges), + sense=pe.minimize) + model.eq_degree = pe.ConstraintList() + model.eq_subtour = pe.ConstraintList() + for i in range(self.n_cities): + model.eq_degree.add(sum(model.x[min(i,j), max(i,j)] + for j in range(self.n_cities) if i != j) == 2) + return model + + def get_instance_features(self): + return np.array([1]) + + def get_variable_features(self, var, index): + return np.array([1]) + + def get_variable_category(self, var, index): + return index + + def find_violations(self, model): + selected_edges = [e for e in self.edges if model.x[e].value > 0.5] + graph = nx.Graph() + graph.add_edges_from(selected_edges) + components = [frozenset(c) for c in list(nx.connected_components(graph))] + violations = [] + for c in components: + if len(c) < self.n_cities: + violations += [c] + return violations + + def build_lazy_constraint(self, model, component): + cut_edges = [e for e in self.edges + if (e[0] in component and e[1] not in component) or + (e[0] not in component and e[1] in component)] + return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2) diff --git a/miplearn/solvers.py b/miplearn/solvers.py index 7ff976c..d410cab 100644 --- a/miplearn/solvers.py +++ b/miplearn/solvers.py @@ -2,7 +2,7 @@ # Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. # Released under the modified BSD license. See COPYING.md for more details. -from . import ObjectiveValueComponent, PrimalSolutionComponent +from . import ObjectiveValueComponent, PrimalSolutionComponent, LazyConstraintsComponent import pyomo.environ as pe from pyomo.core import Var from copy import deepcopy @@ -88,6 +88,9 @@ class InternalSolver: self.solver.update_var(var[index]) logger.info("Fixing values for %d variables (out of %d)" % (count_fixed, count_total)) + + def add_constraint(self, cut): + self.solver.add_constraint(cut) class GurobiSolver(InternalSolver): @@ -198,6 +201,7 @@ class LearningSolver: self.components = { "ObjectiveValue": ObjectiveValueComponent(), "PrimalSolution": PrimalSolutionComponent(), + "LazyConstraints": LazyConstraintsComponent(), } assert self.mode in ["exact", "heuristic"] @@ -231,27 +235,44 @@ class LearningSolver: self.internal_solver = self._create_internal_solver() self.internal_solver.set_model(model) - # Solve LP relaxation + logger.debug("Solving LP relaxation...") results = self.internal_solver.solve_lp(tee=tee) instance.lp_solution = self.internal_solver.get_solution() instance.lp_value = results["Optimal value"] - # Invoke before_solve callbacks + logger.debug("Running before_solve callbacks...") for component in self.components.values(): component.before_solve(self, instance, model) if relaxation_only: return results - # Solver original MIP - results = self.internal_solver.solve(tee=tee) + total_wallclock_time = 0 + instance.found_violations = [] + while True: + logger.debug("Solving MIP...") + results = self.internal_solver.solve(tee=tee) + logger.debug(" %.2f s" % results["Wallclock time"]) + total_wallclock_time += results["Wallclock time"] + if not hasattr(instance, "find_violations"): + break + logger.debug("Finding violated constraints...") + violations = instance.find_violations(model) + if len(violations) == 0: + break + instance.found_violations += violations + logger.debug(" %d violations found" % len(violations)) + for v in violations: + cut = instance.build_lazy_constraint(model, v) + self.internal_solver.add_constraint(cut) + results["Wallclock time"] = total_wallclock_time # Read MIP solution and bounds instance.lower_bound = results["Lower bound"] instance.upper_bound = results["Upper bound"] instance.solution = self.internal_solver.get_solution() - # Invoke after_solve callbacks + logger.debug("Calling after_solve callbacks...") for component in self.components.values(): component.after_solve(self, instance, model) @@ -282,6 +303,7 @@ class LearningSolver: "LP value": instance.lp_value, "Upper bound": instance.upper_bound, "Lower bound": instance.lower_bound, + "Violations": instance.found_violations, } p_map_results = p_map(_process, instances, num_cpus=n_jobs, desc=label) @@ -294,12 +316,7 @@ class LearningSolver: instances[idx].lp_value = r["LP value"] instances[idx].lower_bound = r["Lower bound"] instances[idx].upper_bound = r["Upper bound"] - - for (name, component) in self.components.items(): - subcomponents = [subsolver.components[name] - for subsolver in subsolvers - if name in subsolver.components.keys()] - self.components[name].merge(subcomponents) + instances[idx].found_violations = r["Violations"] return results From 68e972c6351b74279bab7a8dc192f85e0ef92445 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Mon, 24 Feb 2020 22:02:16 -0600 Subject: [PATCH 4/8] Docs: Add TSP --- docs-src/problems.md | 32 ++++++++++++++++++++++++++ docs/404.html | 16 +++++-------- docs/about/index.html | 16 +++++-------- docs/benchmark/index.html | 16 +++++-------- docs/css/cinder.css | 18 +++++---------- docs/css/cinder.min.css | 2 +- docs/customization/index.html | 16 +++++-------- docs/index.html | 18 ++++++--------- docs/problems/index.html | 41 +++++++++++++++++++++++++--------- docs/search/search_index.json | 2 +- docs/sitemap.xml.gz | Bin 198 -> 198 bytes docs/usage/index.html | 16 +++++-------- 12 files changed, 107 insertions(+), 86 deletions(-) diff --git a/docs-src/problems.md b/docs-src/problems.md index c1e9872..57da711 100644 --- a/docs-src/problems.md +++ b/docs-src/problems.md @@ -20,6 +20,8 @@ To illustrate the performance of `LearningSolver`, and to set a baseline for new All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time. + + ## Maximum Weight Stable Set Problem ### Problem definition @@ -45,6 +47,8 @@ MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), ![alt](figures/benchmark_stab_a.png) + + ## Multidimensional 0-1 Knapsack Problem ### Problem definition @@ -115,3 +119,31 @@ MultiKnapsackGenerator(n=randint(low=250, high=251), ``` ![alt](figures/benchmark_knapsack_a.png) + + + +## Traveling Salesman Problem + +### Problem definition + +Given a list of cities and the distance between each pair of cities, the problem asks for the +shortest route starting at the first city, visiting each other city exactly once, then returning +to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's +21 NP-complete problems. + +### Random problem generator + +The class `TravelingSalesmanGenerator` can be used to generate random instances of this +problem. Initially, the generator creates $n$ cities $(x_1,y_1),\ldots,(x_n,y_n) \in \mathbb{R}^2$, +where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions `n`, +`x` and `y`. For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: +$$ + d_{i,j} = \gamma_{i,j} \sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} +$$ +where $\gamma_{i,j}$ is sampled from the distribution `gamma`. + +If `fix_cities=True` is provided, the list of cities is kept the same for all generated instances. +The $gamma$ values, and therefore also the distances, are still different. + +By default, all distances $d_{i,j}$ are rounded to the nearest integer. If `round=False` +is provided, this rounding will be disabled. diff --git a/docs/404.html b/docs/404.html index 590b939..7ac582c 100644 --- a/docs/404.html +++ b/docs/404.html @@ -14,16 +14,14 @@ MIPLearn - - + - + - - + @@ -151,7 +149,8 @@ - + + @@ -163,11 +162,8 @@