From ef180b6f374d839d9746c9719588aad62ce0b7a3 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Sat, 29 Aug 2020 18:20:22 -0500 Subject: [PATCH] Update --- 0.1/404.html | 12 +- 0.1/about/index.html | 14 +- 0.1/benchmark/index.html | 14 +- 0.1/customization/index.html | 14 +- 0.1/index.html | 18 +- 0.1/problems/index.html | 14 +- 0.1/search/lunr.js | 779 ++++++++++++++++++++++++++++------- 0.1/search/main.js | 4 +- 0.1/search/search_index.json | 2 +- 0.1/search/worker.js | 2 + 0.1/sitemap.xml | 18 +- 0.1/sitemap.xml.gz | Bin 198 -> 197 bytes 0.1/usage/index.html | 30 +- dev/404.html | 12 +- dev/about/index.html | 14 +- dev/benchmark/index.html | 14 +- dev/customization/index.html | 14 +- dev/index.html | 18 +- dev/problems/index.html | 14 +- dev/search/lunr.js | 779 ++++++++++++++++++++++++++++------- dev/search/main.js | 4 +- dev/search/search_index.json | 2 +- dev/search/worker.js | 2 + dev/sitemap.xml | 18 +- dev/sitemap.xml.gz | Bin 198 -> 197 bytes dev/usage/index.html | 14 +- 26 files changed, 1465 insertions(+), 361 deletions(-) diff --git a/0.1/404.html b/0.1/404.html index 7dbe612..8f5179d 100644 --- a/0.1/404.html +++ b/0.1/404.html @@ -8,7 +8,7 @@ - + MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -143,7 +147,13 @@ + + + + + + diff --git a/0.1/about/index.html b/0.1/about/index.html index c71ae86..9347b63 100644 --- a/0.1/about/index.html +++ b/0.1/about/index.html @@ -8,7 +8,7 @@ - + About - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -201,7 +205,13 @@ POSSIBILITY OF SUCH DAMAGE. + + + + + + diff --git a/0.1/benchmark/index.html b/0.1/benchmark/index.html index 2f672ad..e7b5fa6 100644 --- a/0.1/benchmark/index.html +++ b/0.1/benchmark/index.html @@ -8,7 +8,7 @@ - + Benchmark - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -207,7 +211,13 @@ benchmark.parallel_solve(test_instances) + + + + + + diff --git a/0.1/customization/index.html b/0.1/customization/index.html index b43e1c4..6769cff 100644 --- a/0.1/customization/index.html +++ b/0.1/customization/index.html @@ -8,7 +8,7 @@ - + Customization - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -301,7 +305,13 @@ comp.fit(train_instances) + + + + + + diff --git a/0.1/index.html b/0.1/index.html index 59c9bc8..535a337 100644 --- a/0.1/index.html +++ b/0.1/index.html @@ -8,7 +8,7 @@ - + Home - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -189,7 +193,13 @@ + + + + + + @@ -268,6 +278,6 @@ diff --git a/0.1/problems/index.html b/0.1/problems/index.html index 9ccf991..70c25eb 100644 --- a/0.1/problems/index.html +++ b/0.1/problems/index.html @@ -8,7 +8,7 @@ - + Problems - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -301,7 +305,13 @@ from the provided probability distributions K and u. + + + + + + diff --git a/0.1/search/lunr.js b/0.1/search/lunr.js index c218cc8..c353765 100644 --- a/0.1/search/lunr.js +++ b/0.1/search/lunr.js @@ -1,6 +1,6 @@ /** - * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.1.6 - * Copyright (C) 2018 Oliver Nightingale + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.8 + * Copyright (C) 2019 Oliver Nightingale * @license MIT */ @@ -54,14 +54,15 @@ var lunr = function (config) { return builder.build() } -lunr.version = "2.1.6" +lunr.version = "2.3.8" /*! * lunr.utils - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** * A namespace containing utils for the rest of the lunr library + * @namespace lunr.utils */ lunr.utils = {} @@ -69,7 +70,8 @@ lunr.utils = {} * Print a warning message to the console. * * @param {String} message The message to be printed. - * @memberOf Utils + * @memberOf lunr.utils + * @function */ lunr.utils.warn = (function (global) { /* eslint-disable no-console */ @@ -90,7 +92,7 @@ lunr.utils.warn = (function (global) { * * @param {Any} obj The object to convert to a string. * @return {String} string representation of the passed object. - * @memberOf Utils + * @memberOf lunr.utils */ lunr.utils.asString = function (obj) { if (obj === void 0 || obj === null) { @@ -99,6 +101,52 @@ lunr.utils.asString = function (obj) { return obj.toString() } } + +/** + * Clones an object. + * + * Will create a copy of an existing object such that any mutations + * on the copy cannot affect the original. + * + * Only shallow objects are supported, passing a nested object to this + * function will cause a TypeError. + * + * Objects with primitives, and arrays of primitives are supported. + * + * @param {Object} obj The object to clone. + * @return {Object} a clone of the passed object. + * @throws {TypeError} when a nested object is passed. + * @memberOf Utils + */ +lunr.utils.clone = function (obj) { + if (obj === null || obj === undefined) { + return obj + } + + var clone = Object.create(null), + keys = Object.keys(obj) + + for (var i = 0; i < keys.length; i++) { + var key = keys[i], + val = obj[key] + + if (Array.isArray(val)) { + clone[key] = val.slice() + continue + } + + if (typeof val === 'string' || + typeof val === 'number' || + typeof val === 'boolean') { + clone[key] = val + continue + } + + throw new TypeError("clone is not deep and does not support nested objects") + } + + return clone +} lunr.FieldRef = function (docRef, fieldName, stringValue) { this.docRef = docRef this.fieldName = fieldName @@ -127,6 +175,139 @@ lunr.FieldRef.prototype.toString = function () { return this._stringValue } +/*! + * lunr.Set + * Copyright (C) 2019 Oliver Nightingale + */ + +/** + * A lunr set. + * + * @constructor + */ +lunr.Set = function (elements) { + this.elements = Object.create(null) + + if (elements) { + this.length = elements.length + + for (var i = 0; i < this.length; i++) { + this.elements[elements[i]] = true + } + } else { + this.length = 0 + } +} + +/** + * A complete set that contains all elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.complete = { + intersect: function (other) { + return other + }, + + union: function (other) { + return other + }, + + contains: function () { + return true + } +} + +/** + * An empty set that contains no elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.empty = { + intersect: function () { + return this + }, + + union: function (other) { + return other + }, + + contains: function () { + return false + } +} + +/** + * Returns true if this set contains the specified object. + * + * @param {object} object - Object whose presence in this set is to be tested. + * @returns {boolean} - True if this set contains the specified object. + */ +lunr.Set.prototype.contains = function (object) { + return !!this.elements[object] +} + +/** + * Returns a new set containing only the elements that are present in both + * this set and the specified set. + * + * @param {lunr.Set} other - set to intersect with this set. + * @returns {lunr.Set} a new set that is the intersection of this and the specified set. + */ + +lunr.Set.prototype.intersect = function (other) { + var a, b, elements, intersection = [] + + if (other === lunr.Set.complete) { + return this + } + + if (other === lunr.Set.empty) { + return other + } + + if (this.length < other.length) { + a = this + b = other + } else { + a = other + b = this + } + + elements = Object.keys(a.elements) + + for (var i = 0; i < elements.length; i++) { + var element = elements[i] + if (element in b.elements) { + intersection.push(element) + } + } + + return new lunr.Set (intersection) +} + +/** + * Returns a new set combining the elements of this and the specified set. + * + * @param {lunr.Set} other - set to union with this set. + * @return {lunr.Set} a new set that is the union of this and the specified set. + */ + +lunr.Set.prototype.union = function (other) { + if (other === lunr.Set.complete) { + return lunr.Set.complete + } + + if (other === lunr.Set.empty) { + return this + } + + return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements))) +} /** * A function to calculate the inverse document frequency for * a posting. This is shared between the builder and the index @@ -208,7 +389,7 @@ lunr.Token.prototype.clone = function (fn) { } /*! * lunr.tokenizer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -220,22 +401,30 @@ lunr.Token.prototype.clone = function (fn) { * then will split this string on the character in `lunr.tokenizer.separator`. * Arrays will have their elements converted to strings and wrapped in a lunr.Token. * + * Optional metadata can be passed to the tokenizer, this metadata will be cloned and + * added as metadata to every token that is created from the object to be tokenized. + * * @static * @param {?(string|object|object[])} obj - The object to convert into tokens + * @param {?object} metadata - Optional metadata to associate with every token * @returns {lunr.Token[]} + * @see {@link lunr.Pipeline} */ -lunr.tokenizer = function (obj) { +lunr.tokenizer = function (obj, metadata) { if (obj == null || obj == undefined) { return [] } if (Array.isArray(obj)) { return obj.map(function (t) { - return new lunr.Token(lunr.utils.asString(t).toLowerCase()) + return new lunr.Token( + lunr.utils.asString(t).toLowerCase(), + lunr.utils.clone(metadata) + ) }) } - var str = obj.toString().trim().toLowerCase(), + var str = obj.toString().toLowerCase(), len = str.length, tokens = [] @@ -246,11 +435,15 @@ lunr.tokenizer = function (obj) { if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) { if (sliceLength > 0) { + var tokenMetadata = lunr.utils.clone(metadata) || {} + tokenMetadata["position"] = [sliceStart, sliceLength] + tokenMetadata["index"] = tokens.length + tokens.push( - new lunr.Token (str.slice(sliceStart, sliceEnd), { - position: [sliceStart, sliceLength], - index: tokens.length - }) + new lunr.Token ( + str.slice(sliceStart, sliceEnd), + tokenMetadata + ) ) } @@ -272,7 +465,7 @@ lunr.tokenizer = function (obj) { lunr.tokenizer.separator = /[\s\-]+/ /*! * lunr.Pipeline - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -316,8 +509,8 @@ lunr.Pipeline.registeredFunctions = Object.create(null) * or mutate (or add) metadata for a given token. * * A pipeline function can indicate that the passed token should be discarded by returning - * null. This token will not be passed to any downstream pipeline functions and will not be - * added to the index. + * null, undefined or an empty string. This token will not be passed to any downstream pipeline + * functions and will not be added to the index. * * Multiple tokens can be returned by returning an array of tokens. Each token will be passed * to any downstream pipeline functions and all will returned tokens will be added to the index. @@ -480,9 +673,9 @@ lunr.Pipeline.prototype.run = function (tokens) { for (var j = 0; j < tokens.length; j++) { var result = fn(tokens[j], j, tokens) - if (result === void 0 || result === '') continue + if (result === null || result === void 0 || result === '') continue - if (result instanceof Array) { + if (Array.isArray(result)) { for (var k = 0; k < result.length; k++) { memo.push(result[k]) } @@ -503,10 +696,12 @@ lunr.Pipeline.prototype.run = function (tokens) { * token and mapping the resulting tokens back to strings. * * @param {string} str - The string to pass through the pipeline. + * @param {?object} metadata - Optional metadata to associate with the token + * passed to the pipeline. * @returns {string[]} */ -lunr.Pipeline.prototype.runString = function (str) { - var token = new lunr.Token (str) +lunr.Pipeline.prototype.runString = function (str, metadata) { + var token = new lunr.Token (str, metadata) return this.run([token]).map(function (t) { return t.toString() @@ -537,7 +732,7 @@ lunr.Pipeline.prototype.toJSON = function () { } /*! * lunr.Vector - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -698,15 +893,14 @@ lunr.Vector.prototype.dot = function (otherVector) { } /** - * Calculates the cosine similarity between this vector and another - * vector. + * Calculates the similarity between this vector and another vector. * * @param {lunr.Vector} otherVector - The other vector to calculate the * similarity with. * @returns {Number} */ lunr.Vector.prototype.similarity = function (otherVector) { - return this.dot(otherVector) / (this.magnitude() * otherVector.magnitude()) + return this.dot(otherVector) / this.magnitude() || 0 } /** @@ -735,7 +929,7 @@ lunr.Vector.prototype.toJSON = function () { /* eslint-disable */ /*! * lunr.stemmer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt */ @@ -748,6 +942,7 @@ lunr.Vector.prototype.toJSON = function () { * @param {lunr.Token} token - The string to stem * @returns {lunr.Token} * @see {@link lunr.Pipeline} + * @function */ lunr.stemmer = (function(){ var step2list = { @@ -956,7 +1151,7 @@ lunr.stemmer = (function(){ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') /*! * lunr.stopWordFilter - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -966,6 +1161,7 @@ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') * The built in lunr.stopWordFilter is built using this generator and can be used * to generate custom stopWordFilters for applications or non English languages. * + * @function * @param {Array} token The token to pass through the filter * @returns {lunr.PipelineFunction} * @see lunr.Pipeline @@ -989,6 +1185,7 @@ lunr.generateStopWordFilter = function (stopWords) { * This is intended to be used in the Pipeline. If the token does not pass the * filter then undefined will be returned. * + * @function * @implements {lunr.PipelineFunction} * @params {lunr.Token} token - A token to check for being a stop word. * @returns {lunr.Token} @@ -1119,7 +1316,7 @@ lunr.stopWordFilter = lunr.generateStopWordFilter([ lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter') /*! * lunr.trimmer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1146,7 +1343,7 @@ lunr.trimmer = function (token) { lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer') /*! * lunr.TokenSet - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1263,50 +1460,58 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { noEditNode.final = true - } else { - stack.push({ - node: noEditNode, - editsRemaining: frame.editsRemaining, - str: frame.str.slice(1) - }) } + + stack.push({ + node: noEditNode, + editsRemaining: frame.editsRemaining, + str: frame.str.slice(1) + }) + } + + if (frame.editsRemaining == 0) { + continue + } + + // insertion + if ("*" in frame.node.edges) { + var insertionNode = frame.node.edges["*"] + } else { + var insertionNode = new lunr.TokenSet + frame.node.edges["*"] = insertionNode } + if (frame.str.length == 0) { + insertionNode.final = true + } + + stack.push({ + node: insertionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str + }) + // deletion // can only do a deletion if we have enough edits remaining // and if there are characters left to delete in the string - if (frame.editsRemaining > 0 && frame.str.length > 1) { - var char = frame.str.charAt(1), - deletionNode - - if (char in frame.node.edges) { - deletionNode = frame.node.edges[char] - } else { - deletionNode = new lunr.TokenSet - frame.node.edges[char] = deletionNode - } - - if (frame.str.length <= 2) { - deletionNode.final = true - } else { - stack.push({ - node: deletionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str.slice(2) - }) - } + if (frame.str.length > 1) { + stack.push({ + node: frame.node, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) } // deletion // just removing the last character from the str - if (frame.editsRemaining > 0 && frame.str.length == 1) { + if (frame.str.length == 1) { frame.node.final = true } // substitution // can only do a substitution if we have enough edits remaining // and if there are characters left to substitute - if (frame.editsRemaining > 0 && frame.str.length >= 1) { + if (frame.str.length >= 1) { if ("*" in frame.node.edges) { var substitutionNode = frame.node.edges["*"] } else { @@ -1316,40 +1521,19 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { substitutionNode.final = true - } else { - stack.push({ - node: substitutionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str.slice(1) - }) } - } - // insertion - // can only do insertion if there are edits remaining - if (frame.editsRemaining > 0) { - if ("*" in frame.node.edges) { - var insertionNode = frame.node.edges["*"] - } else { - var insertionNode = new lunr.TokenSet - frame.node.edges["*"] = insertionNode - } - - if (frame.str.length == 0) { - insertionNode.final = true - } else { - stack.push({ - node: insertionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str - }) - } + stack.push({ + node: substitutionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) } // transposition // can only do a transposition if there are edits remaining // and there are enough characters to transpose - if (frame.editsRemaining > 0 && frame.str.length > 1) { + if (frame.str.length > 1) { var charA = frame.str.charAt(0), charB = frame.str.charAt(1), transposeNode @@ -1363,13 +1547,13 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { transposeNode.final = true - } else { - stack.push({ - node: transposeNode, - editsRemaining: frame.editsRemaining - 1, - str: charA + frame.str.slice(2) - }) } + + stack.push({ + node: transposeNode, + editsRemaining: frame.editsRemaining - 1, + str: charA + frame.str.slice(2) + }) } } @@ -1388,14 +1572,13 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { */ lunr.TokenSet.fromString = function (str) { var node = new lunr.TokenSet, - root = node, - wildcardFound = false + root = node /* * Iterates through all characters within the passed string * appending a node for each character. * - * As soon as a wildcard character is found then a self + * When a wildcard character is found then a self * referencing edge is introduced to continually match * any number of any characters. */ @@ -1404,7 +1587,6 @@ lunr.TokenSet.fromString = function (str) { final = (i == len - 1) if (char == "*") { - wildcardFound = true node.edges[char] = node node.final = final @@ -1414,11 +1596,6 @@ lunr.TokenSet.fromString = function (str) { node.edges[char] = next node = next - - // TODO: is this needed anymore? - if (wildcardFound) { - node.edges["*"] = root - } } } @@ -1429,6 +1606,10 @@ lunr.TokenSet.fromString = function (str) { * Converts this TokenSet into an array of strings * contained within the TokenSet. * + * This is not intended to be used on a TokenSet that + * contains wildcards, in these cases the results are + * undefined and are likely to cause an infinite loop. + * * @returns {string[]} */ lunr.TokenSet.prototype.toArray = function () { @@ -1445,6 +1626,11 @@ lunr.TokenSet.prototype.toArray = function () { len = edges.length if (frame.node.final) { + /* In Safari, at this point the prefix is sometimes corrupted, see: + * https://github.com/olivernn/lunr.js/issues/279 Calling any + * String.prototype method forces Safari to "cast" this string to what + * it's supposed to be, fixing the bug. */ + frame.prefix.charAt(0) words.push(frame.prefix) } @@ -1641,7 +1827,7 @@ lunr.TokenSet.Builder.prototype.minimize = function (downTo) { } /*! * lunr.Index - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1655,7 +1841,7 @@ lunr.TokenSet.Builder.prototype.minimize = function (downTo) { * @constructor * @param {Object} attrs - The attributes of the built search index. * @param {Object} attrs.invertedIndex - An index of term/field to document reference. - * @param {Object} attrs.documentVectors - Document vectors keyed by document reference. + * @param {Object} attrs.fieldVectors - Field vectors * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens. * @param {string[]} attrs.fields - The names of indexed document fields. * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms. @@ -1701,6 +1887,12 @@ lunr.Index = function (attrs) { * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. * Avoid large values for edit distance to improve query performance. * + * Each term also supports a presence modifier. By default a term's presence in document is optional, however + * this can be changed to either required or prohibited. For a term's presence to be required in a document the + * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and + * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not + * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'. + * * To escape special characters the backslash character '\' can be used, this allows searches to include * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead * of attempting to apply a boost of 2 to the search term "foo". @@ -1716,13 +1908,16 @@ lunr.Index = function (attrs) { * hello^10 * @example term with an edit distance of 2 * hello~2 + * @example terms with presence modifiers + * -foo +bar baz */ /** * Performs a search against the index using lunr query syntax. * * Results will be returned sorted by their score, the most relevant results - * will be returned first. + * will be returned first. For details on how the score is calculated, please see + * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}. * * For more programmatic querying use lunr.Index#query. * @@ -1773,7 +1968,18 @@ lunr.Index.prototype.query = function (fn) { var query = new lunr.Query(this.fields), matchingFields = Object.create(null), queryVectors = Object.create(null), - termFieldCache = Object.create(null) + termFieldCache = Object.create(null), + requiredMatches = Object.create(null), + prohibitedMatches = Object.create(null) + + /* + * To support field level boosts a query vector is created per + * field. An empty vector is eagerly created to support negated + * queries. + */ + for (var i = 0; i < this.fields.length; i++) { + queryVectors[this.fields[i]] = new lunr.Vector + } fn.call(query, query) @@ -1787,10 +1993,13 @@ lunr.Index.prototype.query = function (fn) { * for a single query term. */ var clause = query.clauses[i], - terms = null + terms = null, + clauseMatches = lunr.Set.complete if (clause.usePipeline) { - terms = this.pipeline.runString(clause.term) + terms = this.pipeline.runString(clause.term, { + fields: clause.fields + }) } else { terms = [clause.term] } @@ -1814,6 +2023,21 @@ lunr.Index.prototype.query = function (fn) { var termTokenSet = lunr.TokenSet.fromClause(clause), expandedTerms = this.tokenSet.intersect(termTokenSet).toArray() + /* + * If a term marked as required does not exist in the tokenSet it is + * impossible for the search to return any matches. We set all the field + * scoped required matches set to empty and stop examining any further + * clauses. + */ + if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = lunr.Set.empty + } + + break + } + for (var j = 0; j < expandedTerms.length; j++) { /* * For each term get the posting and termIndex, this is required for @@ -1835,26 +2059,50 @@ lunr.Index.prototype.query = function (fn) { var field = clause.fields[k], fieldPosting = posting[field], matchingDocumentRefs = Object.keys(fieldPosting), - termField = expandedTerm + "/" + field + termField = expandedTerm + "/" + field, + matchingDocumentsSet = new lunr.Set(matchingDocumentRefs) /* - * To support field level boosts a query vector is created per - * field. This vector is populated using the termIndex found for - * the term and a unit value with the appropriate boost applied. + * if the presence of this term is required ensure that the matching + * documents are added to the set of required matches for this clause. * - * If the query vector for this field does not exist yet it needs - * to be created. */ - if (queryVectors[field] === undefined) { - queryVectors[field] = new lunr.Vector + if (clause.presence == lunr.Query.presence.REQUIRED) { + clauseMatches = clauseMatches.union(matchingDocumentsSet) + + if (requiredMatches[field] === undefined) { + requiredMatches[field] = lunr.Set.complete + } + } + + /* + * if the presence of this term is prohibited ensure that the matching + * documents are added to the set of prohibited matches for this field, + * creating that set if it does not yet exist. + */ + if (clause.presence == lunr.Query.presence.PROHIBITED) { + if (prohibitedMatches[field] === undefined) { + prohibitedMatches[field] = lunr.Set.empty + } + + prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet) + + /* + * Prohibited matches should not be part of the query vector used for + * similarity scoring and no metadata should be extracted so we continue + * to the next field + */ + continue } /* + * The query field vector is populated using the termIndex found for + * the term and a unit value with the appropriate boost applied. * Using upsert because there could already be an entry in the vector * for the term we are working with. In that case we just add the scores * together. */ - queryVectors[field].upsert(termIndex, 1 * clause.boost, function (a, b) { return a + b }) + queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b }) /** * If we've already seen this term, field combo then we've already collected @@ -1888,12 +2136,65 @@ lunr.Index.prototype.query = function (fn) { } } } + + /** + * If the presence was required we need to update the requiredMatches field sets. + * We do this after all fields for the term have collected their matches because + * the clause terms presence is required in _any_ of the fields not _all_ of the + * fields. + */ + if (clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = requiredMatches[field].intersect(clauseMatches) + } + } + } + + /** + * Need to combine the field scoped required and prohibited + * matching documents into a global set of required and prohibited + * matches + */ + var allRequiredMatches = lunr.Set.complete, + allProhibitedMatches = lunr.Set.empty + + for (var i = 0; i < this.fields.length; i++) { + var field = this.fields[i] + + if (requiredMatches[field]) { + allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field]) + } + + if (prohibitedMatches[field]) { + allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field]) + } } var matchingFieldRefs = Object.keys(matchingFields), results = [], matches = Object.create(null) + /* + * If the query is negated (contains only prohibited terms) + * we need to get _all_ fieldRefs currently existing in the + * index. This is only done when we know that the query is + * entirely prohibited terms to avoid any cost of getting all + * fieldRefs unnecessarily. + * + * Additionally, blank MatchData must be created to correctly + * populate the results. + */ + if (query.isNegated()) { + matchingFieldRefs = Object.keys(this.fieldVectors) + + for (var i = 0; i < matchingFieldRefs.length; i++) { + var matchingFieldRef = matchingFieldRefs[i] + var fieldRef = lunr.FieldRef.fromString(matchingFieldRef) + matchingFields[matchingFieldRef] = new lunr.MatchData + } + } + for (var i = 0; i < matchingFieldRefs.length; i++) { /* * Currently we have document fields that match the query, but we @@ -1904,8 +2205,17 @@ lunr.Index.prototype.query = function (fn) { * above, and combined into a final document score using addition. */ var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]), - docRef = fieldRef.docRef, - fieldVector = this.fieldVectors[fieldRef], + docRef = fieldRef.docRef + + if (!allRequiredMatches.contains(docRef)) { + continue + } + + if (allProhibitedMatches.contains(docRef)) { + continue + } + + var fieldVector = this.fieldVectors[fieldRef], score = queryVectors[fieldRef.fieldName].similarity(fieldVector), docMatch @@ -1970,7 +2280,7 @@ lunr.Index.load = function (serializedIndex) { var attrs = {}, fieldVectors = {}, serializedVectors = serializedIndex.fieldVectors, - invertedIndex = {}, + invertedIndex = Object.create(null), serializedInvertedIndex = serializedIndex.invertedIndex, tokenSetBuilder = new lunr.TokenSet.Builder, pipeline = lunr.Pipeline.load(serializedIndex.pipeline) @@ -2009,7 +2319,7 @@ lunr.Index.load = function (serializedIndex) { } /*! * lunr.Builder - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -2038,7 +2348,8 @@ lunr.Index.load = function (serializedIndex) { */ lunr.Builder = function () { this._ref = "id" - this._fields = [] + this._fields = Object.create(null) + this._documents = Object.create(null) this.invertedIndex = Object.create(null) this.fieldTermFrequencies = {} this.fieldLengths = {} @@ -2068,6 +2379,20 @@ lunr.Builder.prototype.ref = function (ref) { this._ref = ref } +/** + * A function that is used to extract a field from a document. + * + * Lunr expects a field to be at the top level of a document, if however the field + * is deeply nested within a document an extractor function can be used to extract + * the right field for indexing. + * + * @callback fieldExtractor + * @param {object} doc - The document being added to the index. + * @returns {?(string|object|object[])} obj - The object that will be indexed for this field. + * @example Extracting a nested field + * function (doc) { return doc.nested.field } + */ + /** * Adds a field to the list of document fields that will be indexed. Every document being * indexed should have this field. Null values for this field in indexed documents will @@ -2076,10 +2401,22 @@ lunr.Builder.prototype.ref = function (ref) { * All fields should be added before adding documents to the index. Adding fields after * a document has been indexed will have no effect on already indexed documents. * - * @param {string} field - The name of a field to index in all documents. + * Fields can be boosted at build time. This allows terms within that field to have more + * importance when ranking search results. Use a field boost to specify that matches within + * one field are more important than other fields. + * + * @param {string} fieldName - The name of a field to index in all documents. + * @param {object} attributes - Optional attributes associated with this field. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this field. + * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document. + * @throws {RangeError} fieldName cannot contain unsupported characters '/' */ -lunr.Builder.prototype.field = function (field) { - this._fields.push(field) +lunr.Builder.prototype.field = function (fieldName, attributes) { + if (/\//.test(fieldName)) { + throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'") + } + + this._fields[fieldName] = attributes || {} } /** @@ -2121,17 +2458,27 @@ lunr.Builder.prototype.k1 = function (number) { * it should have all fields defined for indexing, though null or undefined values will not * cause errors. * + * Entire documents can be boosted at build time. Applying a boost to a document indicates that + * this document should rank higher in search results than other documents. + * * @param {object} doc - The document to add to the index. + * @param {object} attributes - Optional attributes associated with this document. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this document. */ -lunr.Builder.prototype.add = function (doc) { - var docRef = doc[this._ref] +lunr.Builder.prototype.add = function (doc, attributes) { + var docRef = doc[this._ref], + fields = Object.keys(this._fields) + this._documents[docRef] = attributes || {} this.documentCount += 1 - for (var i = 0; i < this._fields.length; i++) { - var fieldName = this._fields[i], - field = doc[fieldName], - tokens = this.tokenizer(field), + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i], + extractor = this._fields[fieldName].extractor, + field = extractor ? extractor(doc) : doc[fieldName], + tokens = this.tokenizer(field, { + fields: [fieldName] + }), terms = this.pipeline.run(tokens), fieldRef = new lunr.FieldRef (docRef, fieldName), fieldTerms = Object.create(null) @@ -2159,8 +2506,8 @@ lunr.Builder.prototype.add = function (doc) { posting["_index"] = this.termIndex this.termIndex += 1 - for (var k = 0; k < this._fields.length; k++) { - posting[this._fields[k]] = Object.create(null) + for (var k = 0; k < fields.length; k++) { + posting[fields[k]] = Object.create(null) } this.invertedIndex[term] = posting @@ -2211,9 +2558,11 @@ lunr.Builder.prototype.calculateAverageFieldLengths = function () { accumulator[field] += this.fieldLengths[fieldRef] } - for (var i = 0; i < this._fields.length; i++) { - var field = this._fields[i] - accumulator[field] = accumulator[field] / documentsWithField[field] + var fields = Object.keys(this._fields) + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i] + accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName] } this.averageFieldLength = accumulator @@ -2232,13 +2581,17 @@ lunr.Builder.prototype.createFieldVectors = function () { for (var i = 0; i < fieldRefsLength; i++) { var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), - field = fieldRef.fieldName, + fieldName = fieldRef.fieldName, fieldLength = this.fieldLengths[fieldRef], fieldVector = new lunr.Vector, termFrequencies = this.fieldTermFrequencies[fieldRef], terms = Object.keys(termFrequencies), termsLength = terms.length + + var fieldBoost = this._fields[fieldName].boost || 1, + docBoost = this._documents[fieldRef.docRef].boost || 1 + for (var j = 0; j < termsLength; j++) { var term = terms[j], tf = termFrequencies[term], @@ -2252,7 +2605,9 @@ lunr.Builder.prototype.createFieldVectors = function () { idf = termIdfCache[term] } - score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[field])) + tf) + score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf) + score *= fieldBoost + score *= docBoost scoreWithPrecision = Math.round(score * 1000) / 1000 // Converts 1.23456789 to 1.234. // Reducing the precision so that the vectors take up less @@ -2298,7 +2653,7 @@ lunr.Builder.prototype.build = function () { invertedIndex: this.invertedIndex, fieldVectors: this.fieldVectors, tokenSet: this.tokenSet, - fields: this._fields, + fields: Object.keys(this._fields), pipeline: this.searchPipeline }) } @@ -2336,7 +2691,7 @@ lunr.Builder.prototype.use = function (fn) { */ lunr.MatchData = function (term, field, metadata) { var clonedMetadata = Object.create(null), - metadataKeys = Object.keys(metadata) + metadataKeys = Object.keys(metadata || {}) // Cloning the metadata to prevent the original // being mutated during match data combination. @@ -2349,8 +2704,11 @@ lunr.MatchData = function (term, field, metadata) { } this.metadata = Object.create(null) - this.metadata[term] = Object.create(null) - this.metadata[term][field] = clonedMetadata + + if (term !== undefined) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = clonedMetadata + } } /** @@ -2465,11 +2823,42 @@ lunr.Query = function (allFields) { * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING * }) */ + lunr.Query.wildcard = new String ("*") lunr.Query.wildcard.NONE = 0 lunr.Query.wildcard.LEADING = 1 lunr.Query.wildcard.TRAILING = 2 +/** + * Constants for indicating what kind of presence a term must have in matching documents. + * + * @constant + * @enum {number} + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with required presence + * query.term('foo', { presence: lunr.Query.presence.REQUIRED }) + */ +lunr.Query.presence = { + /** + * Term's presence in a document is optional, this is the default value. + */ + OPTIONAL: 1, + + /** + * Term's presence in a document is required, documents that do not contain + * this term will not be returned. + */ + REQUIRED: 2, + + /** + * Term's presence in a document is prohibited, documents that do contain + * this term will not be returned. + */ + PROHIBITED: 3 +} + /** * A single clause in a {@link lunr.Query} contains a term and details on how to * match that term against a {@link lunr.Index}. @@ -2479,7 +2868,8 @@ lunr.Query.wildcard.TRAILING = 2 * @property {number} [boost=1] - Any boost that should be applied when matching this clause. * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. - * @property {number} [wildcard=0] - Whether the term should have wildcards appended or prepended. + * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended. + * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents. */ /** @@ -2517,17 +2907,44 @@ lunr.Query.prototype.clause = function (clause) { clause.term = "" + clause.term + "*" } + if (!('presence' in clause)) { + clause.presence = lunr.Query.presence.OPTIONAL + } + this.clauses.push(clause) return this } +/** + * A negated query is one in which every clause has a presence of + * prohibited. These queries require some special processing to return + * the expected results. + * + * @returns boolean + */ +lunr.Query.prototype.isNegated = function () { + for (var i = 0; i < this.clauses.length; i++) { + if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) { + return false + } + } + + return true +} + /** * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} * to the list of clauses that make up this query. * - * @param {string} term - The term to add to the query. - * @param {Object} [options] - Any additional properties to add to the query clause. + * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion + * to a token or token-like string should be done before calling this method. + * + * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an + * array, each term in the array will share the same options. + * + * @param {object|object[]} term - The term(s) to add to the query. + * @param {object} [options] - Any additional properties to add to the query clause. * @returns {lunr.Query} * @see lunr.Query#clause * @see lunr.Query~Clause @@ -2539,10 +2956,17 @@ lunr.Query.prototype.clause = function (clause) { * boost: 10, * wildcard: lunr.Query.wildcard.TRAILING * }) + * @example using lunr.tokenizer to convert a string to tokens before using them as terms + * query.term(lunr.tokenizer("foo bar")) */ lunr.Query.prototype.term = function (term, options) { + if (Array.isArray(term)) { + term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this) + return this + } + var clause = options || {} - clause.term = term + clause.term = term.toString() this.clause(clause) @@ -2654,6 +3078,7 @@ lunr.QueryLexer.FIELD = 'FIELD' lunr.QueryLexer.TERM = 'TERM' lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE' lunr.QueryLexer.BOOST = 'BOOST' +lunr.QueryLexer.PRESENCE = 'PRESENCE' lunr.QueryLexer.lexField = function (lexer) { lexer.backup() @@ -2742,6 +3167,22 @@ lunr.QueryLexer.lexText = function (lexer) { return lunr.QueryLexer.lexBoost } + // "+" indicates term presence is required + // checking for length to ensure that only + // leading "+" are considered + if (char == "+" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + // "-" indicates term presence is prohibited + // checking for length to ensure that only + // leading "-" are considered + if (char == "-" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + if (char.match(lunr.QueryLexer.termSeparator)) { return lunr.QueryLexer.lexTerm } @@ -2759,7 +3200,7 @@ lunr.QueryParser.prototype.parse = function () { this.lexer.run() this.lexemes = this.lexer.lexemes - var state = lunr.QueryParser.parseFieldOrTerm + var state = lunr.QueryParser.parseClause while (state) { state = state(this) @@ -2784,7 +3225,7 @@ lunr.QueryParser.prototype.nextClause = function () { this.currentClause = {} } -lunr.QueryParser.parseFieldOrTerm = function (parser) { +lunr.QueryParser.parseClause = function (parser) { var lexeme = parser.peekLexeme() if (lexeme == undefined) { @@ -2792,6 +3233,8 @@ lunr.QueryParser.parseFieldOrTerm = function (parser) { } switch (lexeme.type) { + case lunr.QueryLexer.PRESENCE: + return lunr.QueryParser.parsePresence case lunr.QueryLexer.FIELD: return lunr.QueryParser.parseField case lunr.QueryLexer.TERM: @@ -2807,6 +3250,43 @@ lunr.QueryParser.parseFieldOrTerm = function (parser) { } } +lunr.QueryParser.parsePresence = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.str) { + case "-": + parser.currentClause.presence = lunr.Query.presence.PROHIBITED + break + case "+": + parser.currentClause.presence = lunr.Query.presence.REQUIRED + break + default: + var errorMessage = "unrecognised presence operator'" + lexeme.str + "'" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term or field, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + lunr.QueryParser.parseField = function (parser) { var lexeme = parser.consumeLexeme() @@ -2870,6 +3350,9 @@ lunr.QueryParser.parseTerm = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) @@ -2910,6 +3393,9 @@ lunr.QueryParser.parseEditDistance = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) @@ -2950,6 +3436,9 @@ lunr.QueryParser.parseBoost = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) diff --git a/0.1/search/main.js b/0.1/search/main.js index 0a82ab5..0e1fc81 100644 --- a/0.1/search/main.js +++ b/0.1/search/main.js @@ -43,7 +43,7 @@ function displayResults (results) { function doSearch () { var query = document.getElementById('mkdocs-search-query').value; - if (query.length > 2) { + if (query.length > min_search_length) { if (!window.Worker) { displayResults(search(query)); } else { @@ -73,6 +73,8 @@ function onWorkerMessage (e) { } else if (e.data.results) { var results = e.data.results; displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; } } diff --git a/0.1/search/search_index.json b/0.1/search/search_index.json index c8026ec..c5178aa 100644 --- a/0.1/search/search_index.json +++ b/0.1/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"MIPLearn MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details). Features MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized. Documentation Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments Source Code https://github.com/ANL-CEEESA/MIPLearn","title":"Home"},{"location":"#miplearn","text":"MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details).","title":"MIPLearn"},{"location":"#features","text":"MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized.","title":"Features"},{"location":"#documentation","text":"Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments","title":"Documentation"},{"location":"#source-code","text":"https://github.com/ANL-CEEESA/MIPLearn","title":"Source Code"},{"location":"about/","text":"About Authors Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov > Acknowledgments Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357. References Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696 License MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"About"},{"location":"about/#about","text":"","title":"About"},{"location":"about/#authors","text":"Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov >","title":"Authors"},{"location":"about/#acknowledgments","text":"Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.","title":"Acknowledgments"},{"location":"about/#references","text":"Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696","title":"References"},{"location":"about/#license","text":"MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"License"},{"location":"benchmark/","text":"Benchmarks Utilities Using BenchmarkRunner MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2. Saving and loading benchmark results When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Benchmark"},{"location":"benchmark/#benchmarks-utilities","text":"","title":"Benchmarks Utilities"},{"location":"benchmark/#using-benchmarkrunner","text":"MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.","title":"Using BenchmarkRunner"},{"location":"benchmark/#saving-and-loading-benchmark-results","text":"When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Saving and loading benchmark results"},{"location":"customization/","text":"Customization Customizing solver parameters Selecting the internal MIP solver By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3) Customizing solver components LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. Selecting components To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...)) Adjusting component aggressiveness The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95)) Evaluating component performance MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64 Using customized ML classifiers and regressors By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Customization"},{"location":"customization/#customization","text":"","title":"Customization"},{"location":"customization/#customizing-solver-parameters","text":"","title":"Customizing solver parameters"},{"location":"customization/#selecting-the-internal-mip-solver","text":"By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3)","title":"Selecting the internal MIP solver"},{"location":"customization/#customizing-solver-components","text":"LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables.","title":"Customizing solver components"},{"location":"customization/#selecting-components","text":"To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Selecting components"},{"location":"customization/#adjusting-component-aggressiveness","text":"The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95))","title":"Adjusting component aggressiveness"},{"location":"customization/#evaluating-component-performance","text":"MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64","title":"Evaluating component performance"},{"location":"customization/#using-customized-ml-classifiers-and-regressors","text":"By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Using customized ML classifiers and regressors"},{"location":"problems/","text":"Benchmark Problems, Challenges and Results MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters. Preliminaries Benchmark challenges When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from. Baseline results To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time. Maximum Weight Stable Set Problem Problem definition Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems. Random instance generator The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way. Challenge A Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True) Traveling Salesman Problem Problem definition Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems. Random problem generator The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled. Challenge A Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, ) Multidimensional 0-1 Knapsack Problem Problem definition Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*} Random instance generator The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21. Challenge A 250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Problems"},{"location":"problems/#benchmark-problems-challenges-and-results","text":"MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters.","title":"Benchmark Problems, Challenges and Results"},{"location":"problems/#preliminaries","text":"","title":"Preliminaries"},{"location":"problems/#benchmark-challenges","text":"When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.","title":"Benchmark challenges"},{"location":"problems/#baseline-results","text":"To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.","title":"Baseline results"},{"location":"problems/#maximum-weight-stable-set-problem","text":"","title":"Maximum Weight Stable Set Problem"},{"location":"problems/#problem-definition","text":"Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-instance-generator","text":"The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.","title":"Random instance generator"},{"location":"problems/#challenge-a","text":"Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True)","title":"Challenge A"},{"location":"problems/#traveling-salesman-problem","text":"","title":"Traveling Salesman Problem"},{"location":"problems/#problem-definition_1","text":"Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-problem-generator","text":"The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled.","title":"Random problem generator"},{"location":"problems/#challenge-a_1","text":"Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, )","title":"Challenge A"},{"location":"problems/#multidimensional-0-1-knapsack-problem","text":"","title":"Multidimensional 0-1 Knapsack Problem"},{"location":"problems/#problem-definition_2","text":"Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*}","title":"Problem definition"},{"location":"problems/#random-instance-generator_1","text":"The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21.","title":"Random instance generator"},{"location":"problems/#challenge-a_2","text":"250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Challenge A"},{"location":"usage/","text":"Usage Installation MIPLearn is mainly written in Python, with some components written in Julia. For this reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and its Python bindings must be properly installed. Supported solvers are CPLEX and Gurobi. Optimization problems currently need to be specified in the Pyomo modeling language. A JuMP interface to the package is currently under development. To install MIPLearn, run the following commands: git clone https://github.com/ANL-CEEESA/MIPLearn.git cd MIPLearn make install After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Note To install MIPLearn in another Python environment, switch to that environment before running make install . To install the package in development mode, run make develop instead. Using LearningSolver The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process. Describing problem instances Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance. Obtaining heuristic solutions By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible. Saving and loading solver state After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance) Solving training instances in parallel In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances) Current Limitations Only binary and continuous decision variables are currently supported.","title":"Usage"},{"location":"usage/#usage","text":"","title":"Usage"},{"location":"usage/#installation","text":"MIPLearn is mainly written in Python, with some components written in Julia. For this reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and its Python bindings must be properly installed. Supported solvers are CPLEX and Gurobi. Optimization problems currently need to be specified in the Pyomo modeling language. A JuMP interface to the package is currently under development. To install MIPLearn, run the following commands: git clone https://github.com/ANL-CEEESA/MIPLearn.git cd MIPLearn make install After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Note To install MIPLearn in another Python environment, switch to that environment before running make install . To install the package in development mode, run make develop instead.","title":"Installation"},{"location":"usage/#using-learningsolver","text":"The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process.","title":"Using LearningSolver"},{"location":"usage/#describing-problem-instances","text":"Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance.","title":"Describing problem instances"},{"location":"usage/#obtaining-heuristic-solutions","text":"By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.","title":"Obtaining heuristic solutions"},{"location":"usage/#saving-and-loading-solver-state","text":"After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance)","title":"Saving and loading solver state"},{"location":"usage/#solving-training-instances-in-parallel","text":"In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances)","title":"Solving training instances in parallel"},{"location":"usage/#current-limitations","text":"Only binary and continuous decision variables are currently supported.","title":"Current Limitations"}]} \ No newline at end of file +{"config":{"lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"MIPLearn MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details). Features MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized. Documentation Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments Source Code https://github.com/ANL-CEEESA/MIPLearn","title":"Home"},{"location":"#miplearn","text":"MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details).","title":"MIPLearn"},{"location":"#features","text":"MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized.","title":"Features"},{"location":"#documentation","text":"Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments","title":"Documentation"},{"location":"#source-code","text":"https://github.com/ANL-CEEESA/MIPLearn","title":"Source Code"},{"location":"about/","text":"About Authors Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov > Acknowledgments Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357. References Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696 License MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"About"},{"location":"about/#about","text":"","title":"About"},{"location":"about/#authors","text":"Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov >","title":"Authors"},{"location":"about/#acknowledgments","text":"Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.","title":"Acknowledgments"},{"location":"about/#references","text":"Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696","title":"References"},{"location":"about/#license","text":"MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"License"},{"location":"benchmark/","text":"Benchmarks Utilities Using BenchmarkRunner MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2. Saving and loading benchmark results When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Benchmark"},{"location":"benchmark/#benchmarks-utilities","text":"","title":"Benchmarks Utilities"},{"location":"benchmark/#using-benchmarkrunner","text":"MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.","title":"Using BenchmarkRunner"},{"location":"benchmark/#saving-and-loading-benchmark-results","text":"When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Saving and loading benchmark results"},{"location":"customization/","text":"Customization Customizing solver parameters Selecting the internal MIP solver By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3) Customizing solver components LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. Selecting components To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...)) Adjusting component aggressiveness The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95)) Evaluating component performance MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64 Using customized ML classifiers and regressors By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Customization"},{"location":"customization/#customization","text":"","title":"Customization"},{"location":"customization/#customizing-solver-parameters","text":"","title":"Customizing solver parameters"},{"location":"customization/#selecting-the-internal-mip-solver","text":"By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3)","title":"Selecting the internal MIP solver"},{"location":"customization/#customizing-solver-components","text":"LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables.","title":"Customizing solver components"},{"location":"customization/#selecting-components","text":"To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Selecting components"},{"location":"customization/#adjusting-component-aggressiveness","text":"The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95))","title":"Adjusting component aggressiveness"},{"location":"customization/#evaluating-component-performance","text":"MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64","title":"Evaluating component performance"},{"location":"customization/#using-customized-ml-classifiers-and-regressors","text":"By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Using customized ML classifiers and regressors"},{"location":"problems/","text":"Benchmark Problems, Challenges and Results MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters. Preliminaries Benchmark challenges When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from. Baseline results To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time. Maximum Weight Stable Set Problem Problem definition Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems. Random instance generator The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way. Challenge A Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True) Traveling Salesman Problem Problem definition Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems. Random problem generator The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled. Challenge A Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, ) Multidimensional 0-1 Knapsack Problem Problem definition Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*} Random instance generator The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21. Challenge A 250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Problems"},{"location":"problems/#benchmark-problems-challenges-and-results","text":"MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters.","title":"Benchmark Problems, Challenges and Results"},{"location":"problems/#preliminaries","text":"","title":"Preliminaries"},{"location":"problems/#benchmark-challenges","text":"When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.","title":"Benchmark challenges"},{"location":"problems/#baseline-results","text":"To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.","title":"Baseline results"},{"location":"problems/#maximum-weight-stable-set-problem","text":"","title":"Maximum Weight Stable Set Problem"},{"location":"problems/#problem-definition","text":"Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-instance-generator","text":"The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.","title":"Random instance generator"},{"location":"problems/#challenge-a","text":"Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True)","title":"Challenge A"},{"location":"problems/#traveling-salesman-problem","text":"","title":"Traveling Salesman Problem"},{"location":"problems/#problem-definition_1","text":"Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-problem-generator","text":"The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled.","title":"Random problem generator"},{"location":"problems/#challenge-a_1","text":"Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, )","title":"Challenge A"},{"location":"problems/#multidimensional-0-1-knapsack-problem","text":"","title":"Multidimensional 0-1 Knapsack Problem"},{"location":"problems/#problem-definition_2","text":"Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*}","title":"Problem definition"},{"location":"problems/#random-instance-generator_1","text":"The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21.","title":"Random instance generator"},{"location":"problems/#challenge-a_2","text":"250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Challenge A"},{"location":"usage/","text":"Usage Installation In these docs, we describe the Python/Pyomo version of the package, although a Julia/JuMP version is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi. To install MIPLearn, run: pip3 install miplearn After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Using LearningSolver The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process. Describing problem instances Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance. Obtaining heuristic solutions By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible. Saving and loading solver state After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance) Solving training instances in parallel In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances) Current Limitations Only binary and continuous decision variables are currently supported.","title":"Usage"},{"location":"usage/#usage","text":"","title":"Usage"},{"location":"usage/#installation","text":"In these docs, we describe the Python/Pyomo version of the package, although a Julia/JuMP version is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi. To install MIPLearn, run: pip3 install miplearn After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn","title":"Installation"},{"location":"usage/#using-learningsolver","text":"The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process.","title":"Using LearningSolver"},{"location":"usage/#describing-problem-instances","text":"Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance.","title":"Describing problem instances"},{"location":"usage/#obtaining-heuristic-solutions","text":"By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.","title":"Obtaining heuristic solutions"},{"location":"usage/#saving-and-loading-solver-state","text":"After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance)","title":"Saving and loading solver state"},{"location":"usage/#solving-training-instances-in-parallel","text":"In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances)","title":"Solving training instances in parallel"},{"location":"usage/#current-limitations","text":"Only binary and continuous decision variables are currently supported.","title":"Current Limitations"}]} \ No newline at end of file diff --git a/0.1/search/worker.js b/0.1/search/worker.js index a3ccc07..9cce2f7 100644 --- a/0.1/search/worker.js +++ b/0.1/search/worker.js @@ -58,6 +58,7 @@ function onScriptsLoaded () { if (data.config && data.config.separator && data.config.separator.length) { lunr.tokenizer.separator = new RegExp(data.config.separator); } + if (data.index) { index = lunr.Index.load(data.index); data.docs.forEach(function (doc) { @@ -84,6 +85,7 @@ function onScriptsLoaded () { console.log('Lunr index built, search ready'); } allowSearch = true; + postMessage({config: data.config}); postMessage({allowSearch: allowSearch}); } diff --git a/0.1/sitemap.xml b/0.1/sitemap.xml index dbf3ef5..170d005 100644 --- a/0.1/sitemap.xml +++ b/0.1/sitemap.xml @@ -1,31 +1,25 @@ - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily diff --git a/0.1/sitemap.xml.gz b/0.1/sitemap.xml.gz index b7b4f3f8c6fd969ff30825b2036dd2b64ba5132f..644de22cc9a74d71690c3f1d1d09acb937a74199 100644 GIT binary patch literal 197 zcmV;$06PC4iwFp>;!0lv|8r?{Wo=<_E_iKh0PT{o4#FT1hW9=NVJ~2()=;`RI_U!t ziY*C+%AwV_7n+*5_y%P;?z{i;EyugpWYJRxFy6KJKo~~aDc>76wD@{Cl{M}}!5_hb zjwpp4w0MkhK2o|Y3jq`LBxfK#3>4Iu8lW4bj5Ju}T~VO?xb;e$z0;iL-xo@VlkY{v zDps<(teQ05*cMTvbc4PHeHXnn_RZ-JSu>nw&FNly}~zzLzVw9*4) zLWInO1hY}}_69-6)}C$l@83WBn&r)7u;{J>81HJlBMc+$ly8meYJ5H&@)9>9!1JKo^j8s@;DAjymQHWcw#L+v=>8w7cggE$C z6s%x5EAygC>(yH$s+X?Qm!PkrmBv0f{c#ow{bDxY2K)>7Zt2Ix3+bg+Z4(0k04ktg A4gdfE diff --git a/0.1/usage/index.html b/0.1/usage/index.html index b9d40c1..f86204d 100644 --- a/0.1/usage/index.html +++ b/0.1/usage/index.html @@ -8,7 +8,7 @@ - + Usage - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -151,15 +155,9 @@

    Usage

    Installation

    -

    MIPLearn is mainly written in Python, with some components written in Julia. For this -reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and -its Python bindings must be properly installed. Supported solvers are CPLEX and -Gurobi. Optimization problems currently need to be specified in the Pyomo modeling language. -A JuMP interface to the package is currently under development.

    -

    To install MIPLearn, run the following commands:

    -
    git clone https://github.com/ANL-CEEESA/MIPLearn.git
    -cd MIPLearn
    -make install
    +

    In these docs, we describe the Python/Pyomo version of the package, although a Julia/JuMP version is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi.

    +

    To install MIPLearn, run:

    +
    pip3 install miplearn
     

    After installation, the package miplearn should become available to Python. It can be imported @@ -167,10 +165,6 @@ as follows:

    import miplearn
     
    -
    -

    Note

    -

    To install MIPLearn in another Python environment, switch to that environment before running make install. To install the package in development mode, run make develop instead.

    -

    Using LearningSolver

    The main class provided by this package is LearningSolver, a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage:

    from miplearn import LearningSolver
    @@ -276,7 +270,13 @@ solver.parallel_solve(test_instances)
         
         
         
    +
    +    
    +    
    +        
    +    
         
    +
         
         
         
    diff --git a/dev/404.html b/dev/404.html
    index 7dbe612..8f5179d 100644
    --- a/dev/404.html
    +++ b/dev/404.html
    @@ -8,7 +8,7 @@
         
         
         
    -    
    +    
     
         
         MIPLearn
    @@ -24,6 +24,10 @@
         
     
         
    +        
    +        
    +        
    +    
         
     
         
    @@ -143,7 +147,13 @@
         
         
         
    +
         
    +    
    +        
    +    
    +    
    +
         
         
         
    diff --git a/dev/about/index.html b/dev/about/index.html
    index c71ae86..9347b63 100644
    --- a/dev/about/index.html
    +++ b/dev/about/index.html
    @@ -8,7 +8,7 @@
         
         
         
    -    
    +    
     
         
         About - MIPLearn
    @@ -24,6 +24,10 @@
         
     
         
    +        
    +        
    +        
    +    
         
     
         
    @@ -118,7 +122,7 @@
                             
                         
                         
  • - Edit on GitHub + Edit on GitHub
  • @@ -201,7 +205,13 @@ POSSIBILITY OF SUCH DAMAGE. + + + + + + diff --git a/dev/benchmark/index.html b/dev/benchmark/index.html index 2f672ad..e7b5fa6 100644 --- a/dev/benchmark/index.html +++ b/dev/benchmark/index.html @@ -8,7 +8,7 @@ - + Benchmark - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -207,7 +211,13 @@ benchmark.parallel_solve(test_instances) + + + + + + diff --git a/dev/customization/index.html b/dev/customization/index.html index b43e1c4..6769cff 100644 --- a/dev/customization/index.html +++ b/dev/customization/index.html @@ -8,7 +8,7 @@ - + Customization - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -301,7 +305,13 @@ comp.fit(train_instances) + + + + + + diff --git a/dev/index.html b/dev/index.html index 9f29cff..f5173ba 100644 --- a/dev/index.html +++ b/dev/index.html @@ -8,7 +8,7 @@ - + Home - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -189,7 +193,13 @@ + + + + + + @@ -268,6 +278,6 @@ diff --git a/dev/problems/index.html b/dev/problems/index.html index 9ccf991..70c25eb 100644 --- a/dev/problems/index.html +++ b/dev/problems/index.html @@ -8,7 +8,7 @@ - + Problems - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -301,7 +305,13 @@ from the provided probability distributions K and u. + + + + + + diff --git a/dev/search/lunr.js b/dev/search/lunr.js index c218cc8..c353765 100644 --- a/dev/search/lunr.js +++ b/dev/search/lunr.js @@ -1,6 +1,6 @@ /** - * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.1.6 - * Copyright (C) 2018 Oliver Nightingale + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.8 + * Copyright (C) 2019 Oliver Nightingale * @license MIT */ @@ -54,14 +54,15 @@ var lunr = function (config) { return builder.build() } -lunr.version = "2.1.6" +lunr.version = "2.3.8" /*! * lunr.utils - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** * A namespace containing utils for the rest of the lunr library + * @namespace lunr.utils */ lunr.utils = {} @@ -69,7 +70,8 @@ lunr.utils = {} * Print a warning message to the console. * * @param {String} message The message to be printed. - * @memberOf Utils + * @memberOf lunr.utils + * @function */ lunr.utils.warn = (function (global) { /* eslint-disable no-console */ @@ -90,7 +92,7 @@ lunr.utils.warn = (function (global) { * * @param {Any} obj The object to convert to a string. * @return {String} string representation of the passed object. - * @memberOf Utils + * @memberOf lunr.utils */ lunr.utils.asString = function (obj) { if (obj === void 0 || obj === null) { @@ -99,6 +101,52 @@ lunr.utils.asString = function (obj) { return obj.toString() } } + +/** + * Clones an object. + * + * Will create a copy of an existing object such that any mutations + * on the copy cannot affect the original. + * + * Only shallow objects are supported, passing a nested object to this + * function will cause a TypeError. + * + * Objects with primitives, and arrays of primitives are supported. + * + * @param {Object} obj The object to clone. + * @return {Object} a clone of the passed object. + * @throws {TypeError} when a nested object is passed. + * @memberOf Utils + */ +lunr.utils.clone = function (obj) { + if (obj === null || obj === undefined) { + return obj + } + + var clone = Object.create(null), + keys = Object.keys(obj) + + for (var i = 0; i < keys.length; i++) { + var key = keys[i], + val = obj[key] + + if (Array.isArray(val)) { + clone[key] = val.slice() + continue + } + + if (typeof val === 'string' || + typeof val === 'number' || + typeof val === 'boolean') { + clone[key] = val + continue + } + + throw new TypeError("clone is not deep and does not support nested objects") + } + + return clone +} lunr.FieldRef = function (docRef, fieldName, stringValue) { this.docRef = docRef this.fieldName = fieldName @@ -127,6 +175,139 @@ lunr.FieldRef.prototype.toString = function () { return this._stringValue } +/*! + * lunr.Set + * Copyright (C) 2019 Oliver Nightingale + */ + +/** + * A lunr set. + * + * @constructor + */ +lunr.Set = function (elements) { + this.elements = Object.create(null) + + if (elements) { + this.length = elements.length + + for (var i = 0; i < this.length; i++) { + this.elements[elements[i]] = true + } + } else { + this.length = 0 + } +} + +/** + * A complete set that contains all elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.complete = { + intersect: function (other) { + return other + }, + + union: function (other) { + return other + }, + + contains: function () { + return true + } +} + +/** + * An empty set that contains no elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.empty = { + intersect: function () { + return this + }, + + union: function (other) { + return other + }, + + contains: function () { + return false + } +} + +/** + * Returns true if this set contains the specified object. + * + * @param {object} object - Object whose presence in this set is to be tested. + * @returns {boolean} - True if this set contains the specified object. + */ +lunr.Set.prototype.contains = function (object) { + return !!this.elements[object] +} + +/** + * Returns a new set containing only the elements that are present in both + * this set and the specified set. + * + * @param {lunr.Set} other - set to intersect with this set. + * @returns {lunr.Set} a new set that is the intersection of this and the specified set. + */ + +lunr.Set.prototype.intersect = function (other) { + var a, b, elements, intersection = [] + + if (other === lunr.Set.complete) { + return this + } + + if (other === lunr.Set.empty) { + return other + } + + if (this.length < other.length) { + a = this + b = other + } else { + a = other + b = this + } + + elements = Object.keys(a.elements) + + for (var i = 0; i < elements.length; i++) { + var element = elements[i] + if (element in b.elements) { + intersection.push(element) + } + } + + return new lunr.Set (intersection) +} + +/** + * Returns a new set combining the elements of this and the specified set. + * + * @param {lunr.Set} other - set to union with this set. + * @return {lunr.Set} a new set that is the union of this and the specified set. + */ + +lunr.Set.prototype.union = function (other) { + if (other === lunr.Set.complete) { + return lunr.Set.complete + } + + if (other === lunr.Set.empty) { + return this + } + + return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements))) +} /** * A function to calculate the inverse document frequency for * a posting. This is shared between the builder and the index @@ -208,7 +389,7 @@ lunr.Token.prototype.clone = function (fn) { } /*! * lunr.tokenizer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -220,22 +401,30 @@ lunr.Token.prototype.clone = function (fn) { * then will split this string on the character in `lunr.tokenizer.separator`. * Arrays will have their elements converted to strings and wrapped in a lunr.Token. * + * Optional metadata can be passed to the tokenizer, this metadata will be cloned and + * added as metadata to every token that is created from the object to be tokenized. + * * @static * @param {?(string|object|object[])} obj - The object to convert into tokens + * @param {?object} metadata - Optional metadata to associate with every token * @returns {lunr.Token[]} + * @see {@link lunr.Pipeline} */ -lunr.tokenizer = function (obj) { +lunr.tokenizer = function (obj, metadata) { if (obj == null || obj == undefined) { return [] } if (Array.isArray(obj)) { return obj.map(function (t) { - return new lunr.Token(lunr.utils.asString(t).toLowerCase()) + return new lunr.Token( + lunr.utils.asString(t).toLowerCase(), + lunr.utils.clone(metadata) + ) }) } - var str = obj.toString().trim().toLowerCase(), + var str = obj.toString().toLowerCase(), len = str.length, tokens = [] @@ -246,11 +435,15 @@ lunr.tokenizer = function (obj) { if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) { if (sliceLength > 0) { + var tokenMetadata = lunr.utils.clone(metadata) || {} + tokenMetadata["position"] = [sliceStart, sliceLength] + tokenMetadata["index"] = tokens.length + tokens.push( - new lunr.Token (str.slice(sliceStart, sliceEnd), { - position: [sliceStart, sliceLength], - index: tokens.length - }) + new lunr.Token ( + str.slice(sliceStart, sliceEnd), + tokenMetadata + ) ) } @@ -272,7 +465,7 @@ lunr.tokenizer = function (obj) { lunr.tokenizer.separator = /[\s\-]+/ /*! * lunr.Pipeline - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -316,8 +509,8 @@ lunr.Pipeline.registeredFunctions = Object.create(null) * or mutate (or add) metadata for a given token. * * A pipeline function can indicate that the passed token should be discarded by returning - * null. This token will not be passed to any downstream pipeline functions and will not be - * added to the index. + * null, undefined or an empty string. This token will not be passed to any downstream pipeline + * functions and will not be added to the index. * * Multiple tokens can be returned by returning an array of tokens. Each token will be passed * to any downstream pipeline functions and all will returned tokens will be added to the index. @@ -480,9 +673,9 @@ lunr.Pipeline.prototype.run = function (tokens) { for (var j = 0; j < tokens.length; j++) { var result = fn(tokens[j], j, tokens) - if (result === void 0 || result === '') continue + if (result === null || result === void 0 || result === '') continue - if (result instanceof Array) { + if (Array.isArray(result)) { for (var k = 0; k < result.length; k++) { memo.push(result[k]) } @@ -503,10 +696,12 @@ lunr.Pipeline.prototype.run = function (tokens) { * token and mapping the resulting tokens back to strings. * * @param {string} str - The string to pass through the pipeline. + * @param {?object} metadata - Optional metadata to associate with the token + * passed to the pipeline. * @returns {string[]} */ -lunr.Pipeline.prototype.runString = function (str) { - var token = new lunr.Token (str) +lunr.Pipeline.prototype.runString = function (str, metadata) { + var token = new lunr.Token (str, metadata) return this.run([token]).map(function (t) { return t.toString() @@ -537,7 +732,7 @@ lunr.Pipeline.prototype.toJSON = function () { } /*! * lunr.Vector - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -698,15 +893,14 @@ lunr.Vector.prototype.dot = function (otherVector) { } /** - * Calculates the cosine similarity between this vector and another - * vector. + * Calculates the similarity between this vector and another vector. * * @param {lunr.Vector} otherVector - The other vector to calculate the * similarity with. * @returns {Number} */ lunr.Vector.prototype.similarity = function (otherVector) { - return this.dot(otherVector) / (this.magnitude() * otherVector.magnitude()) + return this.dot(otherVector) / this.magnitude() || 0 } /** @@ -735,7 +929,7 @@ lunr.Vector.prototype.toJSON = function () { /* eslint-disable */ /*! * lunr.stemmer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt */ @@ -748,6 +942,7 @@ lunr.Vector.prototype.toJSON = function () { * @param {lunr.Token} token - The string to stem * @returns {lunr.Token} * @see {@link lunr.Pipeline} + * @function */ lunr.stemmer = (function(){ var step2list = { @@ -956,7 +1151,7 @@ lunr.stemmer = (function(){ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') /*! * lunr.stopWordFilter - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -966,6 +1161,7 @@ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') * The built in lunr.stopWordFilter is built using this generator and can be used * to generate custom stopWordFilters for applications or non English languages. * + * @function * @param {Array} token The token to pass through the filter * @returns {lunr.PipelineFunction} * @see lunr.Pipeline @@ -989,6 +1185,7 @@ lunr.generateStopWordFilter = function (stopWords) { * This is intended to be used in the Pipeline. If the token does not pass the * filter then undefined will be returned. * + * @function * @implements {lunr.PipelineFunction} * @params {lunr.Token} token - A token to check for being a stop word. * @returns {lunr.Token} @@ -1119,7 +1316,7 @@ lunr.stopWordFilter = lunr.generateStopWordFilter([ lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter') /*! * lunr.trimmer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1146,7 +1343,7 @@ lunr.trimmer = function (token) { lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer') /*! * lunr.TokenSet - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1263,50 +1460,58 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { noEditNode.final = true - } else { - stack.push({ - node: noEditNode, - editsRemaining: frame.editsRemaining, - str: frame.str.slice(1) - }) } + + stack.push({ + node: noEditNode, + editsRemaining: frame.editsRemaining, + str: frame.str.slice(1) + }) + } + + if (frame.editsRemaining == 0) { + continue + } + + // insertion + if ("*" in frame.node.edges) { + var insertionNode = frame.node.edges["*"] + } else { + var insertionNode = new lunr.TokenSet + frame.node.edges["*"] = insertionNode } + if (frame.str.length == 0) { + insertionNode.final = true + } + + stack.push({ + node: insertionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str + }) + // deletion // can only do a deletion if we have enough edits remaining // and if there are characters left to delete in the string - if (frame.editsRemaining > 0 && frame.str.length > 1) { - var char = frame.str.charAt(1), - deletionNode - - if (char in frame.node.edges) { - deletionNode = frame.node.edges[char] - } else { - deletionNode = new lunr.TokenSet - frame.node.edges[char] = deletionNode - } - - if (frame.str.length <= 2) { - deletionNode.final = true - } else { - stack.push({ - node: deletionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str.slice(2) - }) - } + if (frame.str.length > 1) { + stack.push({ + node: frame.node, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) } // deletion // just removing the last character from the str - if (frame.editsRemaining > 0 && frame.str.length == 1) { + if (frame.str.length == 1) { frame.node.final = true } // substitution // can only do a substitution if we have enough edits remaining // and if there are characters left to substitute - if (frame.editsRemaining > 0 && frame.str.length >= 1) { + if (frame.str.length >= 1) { if ("*" in frame.node.edges) { var substitutionNode = frame.node.edges["*"] } else { @@ -1316,40 +1521,19 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { substitutionNode.final = true - } else { - stack.push({ - node: substitutionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str.slice(1) - }) } - } - // insertion - // can only do insertion if there are edits remaining - if (frame.editsRemaining > 0) { - if ("*" in frame.node.edges) { - var insertionNode = frame.node.edges["*"] - } else { - var insertionNode = new lunr.TokenSet - frame.node.edges["*"] = insertionNode - } - - if (frame.str.length == 0) { - insertionNode.final = true - } else { - stack.push({ - node: insertionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str - }) - } + stack.push({ + node: substitutionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) } // transposition // can only do a transposition if there are edits remaining // and there are enough characters to transpose - if (frame.editsRemaining > 0 && frame.str.length > 1) { + if (frame.str.length > 1) { var charA = frame.str.charAt(0), charB = frame.str.charAt(1), transposeNode @@ -1363,13 +1547,13 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { transposeNode.final = true - } else { - stack.push({ - node: transposeNode, - editsRemaining: frame.editsRemaining - 1, - str: charA + frame.str.slice(2) - }) } + + stack.push({ + node: transposeNode, + editsRemaining: frame.editsRemaining - 1, + str: charA + frame.str.slice(2) + }) } } @@ -1388,14 +1572,13 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { */ lunr.TokenSet.fromString = function (str) { var node = new lunr.TokenSet, - root = node, - wildcardFound = false + root = node /* * Iterates through all characters within the passed string * appending a node for each character. * - * As soon as a wildcard character is found then a self + * When a wildcard character is found then a self * referencing edge is introduced to continually match * any number of any characters. */ @@ -1404,7 +1587,6 @@ lunr.TokenSet.fromString = function (str) { final = (i == len - 1) if (char == "*") { - wildcardFound = true node.edges[char] = node node.final = final @@ -1414,11 +1596,6 @@ lunr.TokenSet.fromString = function (str) { node.edges[char] = next node = next - - // TODO: is this needed anymore? - if (wildcardFound) { - node.edges["*"] = root - } } } @@ -1429,6 +1606,10 @@ lunr.TokenSet.fromString = function (str) { * Converts this TokenSet into an array of strings * contained within the TokenSet. * + * This is not intended to be used on a TokenSet that + * contains wildcards, in these cases the results are + * undefined and are likely to cause an infinite loop. + * * @returns {string[]} */ lunr.TokenSet.prototype.toArray = function () { @@ -1445,6 +1626,11 @@ lunr.TokenSet.prototype.toArray = function () { len = edges.length if (frame.node.final) { + /* In Safari, at this point the prefix is sometimes corrupted, see: + * https://github.com/olivernn/lunr.js/issues/279 Calling any + * String.prototype method forces Safari to "cast" this string to what + * it's supposed to be, fixing the bug. */ + frame.prefix.charAt(0) words.push(frame.prefix) } @@ -1641,7 +1827,7 @@ lunr.TokenSet.Builder.prototype.minimize = function (downTo) { } /*! * lunr.Index - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1655,7 +1841,7 @@ lunr.TokenSet.Builder.prototype.minimize = function (downTo) { * @constructor * @param {Object} attrs - The attributes of the built search index. * @param {Object} attrs.invertedIndex - An index of term/field to document reference. - * @param {Object} attrs.documentVectors - Document vectors keyed by document reference. + * @param {Object} attrs.fieldVectors - Field vectors * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens. * @param {string[]} attrs.fields - The names of indexed document fields. * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms. @@ -1701,6 +1887,12 @@ lunr.Index = function (attrs) { * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. * Avoid large values for edit distance to improve query performance. * + * Each term also supports a presence modifier. By default a term's presence in document is optional, however + * this can be changed to either required or prohibited. For a term's presence to be required in a document the + * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and + * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not + * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'. + * * To escape special characters the backslash character '\' can be used, this allows searches to include * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead * of attempting to apply a boost of 2 to the search term "foo". @@ -1716,13 +1908,16 @@ lunr.Index = function (attrs) { * hello^10 * @example term with an edit distance of 2 * hello~2 + * @example terms with presence modifiers + * -foo +bar baz */ /** * Performs a search against the index using lunr query syntax. * * Results will be returned sorted by their score, the most relevant results - * will be returned first. + * will be returned first. For details on how the score is calculated, please see + * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}. * * For more programmatic querying use lunr.Index#query. * @@ -1773,7 +1968,18 @@ lunr.Index.prototype.query = function (fn) { var query = new lunr.Query(this.fields), matchingFields = Object.create(null), queryVectors = Object.create(null), - termFieldCache = Object.create(null) + termFieldCache = Object.create(null), + requiredMatches = Object.create(null), + prohibitedMatches = Object.create(null) + + /* + * To support field level boosts a query vector is created per + * field. An empty vector is eagerly created to support negated + * queries. + */ + for (var i = 0; i < this.fields.length; i++) { + queryVectors[this.fields[i]] = new lunr.Vector + } fn.call(query, query) @@ -1787,10 +1993,13 @@ lunr.Index.prototype.query = function (fn) { * for a single query term. */ var clause = query.clauses[i], - terms = null + terms = null, + clauseMatches = lunr.Set.complete if (clause.usePipeline) { - terms = this.pipeline.runString(clause.term) + terms = this.pipeline.runString(clause.term, { + fields: clause.fields + }) } else { terms = [clause.term] } @@ -1814,6 +2023,21 @@ lunr.Index.prototype.query = function (fn) { var termTokenSet = lunr.TokenSet.fromClause(clause), expandedTerms = this.tokenSet.intersect(termTokenSet).toArray() + /* + * If a term marked as required does not exist in the tokenSet it is + * impossible for the search to return any matches. We set all the field + * scoped required matches set to empty and stop examining any further + * clauses. + */ + if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = lunr.Set.empty + } + + break + } + for (var j = 0; j < expandedTerms.length; j++) { /* * For each term get the posting and termIndex, this is required for @@ -1835,26 +2059,50 @@ lunr.Index.prototype.query = function (fn) { var field = clause.fields[k], fieldPosting = posting[field], matchingDocumentRefs = Object.keys(fieldPosting), - termField = expandedTerm + "/" + field + termField = expandedTerm + "/" + field, + matchingDocumentsSet = new lunr.Set(matchingDocumentRefs) /* - * To support field level boosts a query vector is created per - * field. This vector is populated using the termIndex found for - * the term and a unit value with the appropriate boost applied. + * if the presence of this term is required ensure that the matching + * documents are added to the set of required matches for this clause. * - * If the query vector for this field does not exist yet it needs - * to be created. */ - if (queryVectors[field] === undefined) { - queryVectors[field] = new lunr.Vector + if (clause.presence == lunr.Query.presence.REQUIRED) { + clauseMatches = clauseMatches.union(matchingDocumentsSet) + + if (requiredMatches[field] === undefined) { + requiredMatches[field] = lunr.Set.complete + } + } + + /* + * if the presence of this term is prohibited ensure that the matching + * documents are added to the set of prohibited matches for this field, + * creating that set if it does not yet exist. + */ + if (clause.presence == lunr.Query.presence.PROHIBITED) { + if (prohibitedMatches[field] === undefined) { + prohibitedMatches[field] = lunr.Set.empty + } + + prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet) + + /* + * Prohibited matches should not be part of the query vector used for + * similarity scoring and no metadata should be extracted so we continue + * to the next field + */ + continue } /* + * The query field vector is populated using the termIndex found for + * the term and a unit value with the appropriate boost applied. * Using upsert because there could already be an entry in the vector * for the term we are working with. In that case we just add the scores * together. */ - queryVectors[field].upsert(termIndex, 1 * clause.boost, function (a, b) { return a + b }) + queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b }) /** * If we've already seen this term, field combo then we've already collected @@ -1888,12 +2136,65 @@ lunr.Index.prototype.query = function (fn) { } } } + + /** + * If the presence was required we need to update the requiredMatches field sets. + * We do this after all fields for the term have collected their matches because + * the clause terms presence is required in _any_ of the fields not _all_ of the + * fields. + */ + if (clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = requiredMatches[field].intersect(clauseMatches) + } + } + } + + /** + * Need to combine the field scoped required and prohibited + * matching documents into a global set of required and prohibited + * matches + */ + var allRequiredMatches = lunr.Set.complete, + allProhibitedMatches = lunr.Set.empty + + for (var i = 0; i < this.fields.length; i++) { + var field = this.fields[i] + + if (requiredMatches[field]) { + allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field]) + } + + if (prohibitedMatches[field]) { + allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field]) + } } var matchingFieldRefs = Object.keys(matchingFields), results = [], matches = Object.create(null) + /* + * If the query is negated (contains only prohibited terms) + * we need to get _all_ fieldRefs currently existing in the + * index. This is only done when we know that the query is + * entirely prohibited terms to avoid any cost of getting all + * fieldRefs unnecessarily. + * + * Additionally, blank MatchData must be created to correctly + * populate the results. + */ + if (query.isNegated()) { + matchingFieldRefs = Object.keys(this.fieldVectors) + + for (var i = 0; i < matchingFieldRefs.length; i++) { + var matchingFieldRef = matchingFieldRefs[i] + var fieldRef = lunr.FieldRef.fromString(matchingFieldRef) + matchingFields[matchingFieldRef] = new lunr.MatchData + } + } + for (var i = 0; i < matchingFieldRefs.length; i++) { /* * Currently we have document fields that match the query, but we @@ -1904,8 +2205,17 @@ lunr.Index.prototype.query = function (fn) { * above, and combined into a final document score using addition. */ var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]), - docRef = fieldRef.docRef, - fieldVector = this.fieldVectors[fieldRef], + docRef = fieldRef.docRef + + if (!allRequiredMatches.contains(docRef)) { + continue + } + + if (allProhibitedMatches.contains(docRef)) { + continue + } + + var fieldVector = this.fieldVectors[fieldRef], score = queryVectors[fieldRef.fieldName].similarity(fieldVector), docMatch @@ -1970,7 +2280,7 @@ lunr.Index.load = function (serializedIndex) { var attrs = {}, fieldVectors = {}, serializedVectors = serializedIndex.fieldVectors, - invertedIndex = {}, + invertedIndex = Object.create(null), serializedInvertedIndex = serializedIndex.invertedIndex, tokenSetBuilder = new lunr.TokenSet.Builder, pipeline = lunr.Pipeline.load(serializedIndex.pipeline) @@ -2009,7 +2319,7 @@ lunr.Index.load = function (serializedIndex) { } /*! * lunr.Builder - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -2038,7 +2348,8 @@ lunr.Index.load = function (serializedIndex) { */ lunr.Builder = function () { this._ref = "id" - this._fields = [] + this._fields = Object.create(null) + this._documents = Object.create(null) this.invertedIndex = Object.create(null) this.fieldTermFrequencies = {} this.fieldLengths = {} @@ -2068,6 +2379,20 @@ lunr.Builder.prototype.ref = function (ref) { this._ref = ref } +/** + * A function that is used to extract a field from a document. + * + * Lunr expects a field to be at the top level of a document, if however the field + * is deeply nested within a document an extractor function can be used to extract + * the right field for indexing. + * + * @callback fieldExtractor + * @param {object} doc - The document being added to the index. + * @returns {?(string|object|object[])} obj - The object that will be indexed for this field. + * @example Extracting a nested field + * function (doc) { return doc.nested.field } + */ + /** * Adds a field to the list of document fields that will be indexed. Every document being * indexed should have this field. Null values for this field in indexed documents will @@ -2076,10 +2401,22 @@ lunr.Builder.prototype.ref = function (ref) { * All fields should be added before adding documents to the index. Adding fields after * a document has been indexed will have no effect on already indexed documents. * - * @param {string} field - The name of a field to index in all documents. + * Fields can be boosted at build time. This allows terms within that field to have more + * importance when ranking search results. Use a field boost to specify that matches within + * one field are more important than other fields. + * + * @param {string} fieldName - The name of a field to index in all documents. + * @param {object} attributes - Optional attributes associated with this field. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this field. + * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document. + * @throws {RangeError} fieldName cannot contain unsupported characters '/' */ -lunr.Builder.prototype.field = function (field) { - this._fields.push(field) +lunr.Builder.prototype.field = function (fieldName, attributes) { + if (/\//.test(fieldName)) { + throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'") + } + + this._fields[fieldName] = attributes || {} } /** @@ -2121,17 +2458,27 @@ lunr.Builder.prototype.k1 = function (number) { * it should have all fields defined for indexing, though null or undefined values will not * cause errors. * + * Entire documents can be boosted at build time. Applying a boost to a document indicates that + * this document should rank higher in search results than other documents. + * * @param {object} doc - The document to add to the index. + * @param {object} attributes - Optional attributes associated with this document. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this document. */ -lunr.Builder.prototype.add = function (doc) { - var docRef = doc[this._ref] +lunr.Builder.prototype.add = function (doc, attributes) { + var docRef = doc[this._ref], + fields = Object.keys(this._fields) + this._documents[docRef] = attributes || {} this.documentCount += 1 - for (var i = 0; i < this._fields.length; i++) { - var fieldName = this._fields[i], - field = doc[fieldName], - tokens = this.tokenizer(field), + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i], + extractor = this._fields[fieldName].extractor, + field = extractor ? extractor(doc) : doc[fieldName], + tokens = this.tokenizer(field, { + fields: [fieldName] + }), terms = this.pipeline.run(tokens), fieldRef = new lunr.FieldRef (docRef, fieldName), fieldTerms = Object.create(null) @@ -2159,8 +2506,8 @@ lunr.Builder.prototype.add = function (doc) { posting["_index"] = this.termIndex this.termIndex += 1 - for (var k = 0; k < this._fields.length; k++) { - posting[this._fields[k]] = Object.create(null) + for (var k = 0; k < fields.length; k++) { + posting[fields[k]] = Object.create(null) } this.invertedIndex[term] = posting @@ -2211,9 +2558,11 @@ lunr.Builder.prototype.calculateAverageFieldLengths = function () { accumulator[field] += this.fieldLengths[fieldRef] } - for (var i = 0; i < this._fields.length; i++) { - var field = this._fields[i] - accumulator[field] = accumulator[field] / documentsWithField[field] + var fields = Object.keys(this._fields) + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i] + accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName] } this.averageFieldLength = accumulator @@ -2232,13 +2581,17 @@ lunr.Builder.prototype.createFieldVectors = function () { for (var i = 0; i < fieldRefsLength; i++) { var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), - field = fieldRef.fieldName, + fieldName = fieldRef.fieldName, fieldLength = this.fieldLengths[fieldRef], fieldVector = new lunr.Vector, termFrequencies = this.fieldTermFrequencies[fieldRef], terms = Object.keys(termFrequencies), termsLength = terms.length + + var fieldBoost = this._fields[fieldName].boost || 1, + docBoost = this._documents[fieldRef.docRef].boost || 1 + for (var j = 0; j < termsLength; j++) { var term = terms[j], tf = termFrequencies[term], @@ -2252,7 +2605,9 @@ lunr.Builder.prototype.createFieldVectors = function () { idf = termIdfCache[term] } - score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[field])) + tf) + score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf) + score *= fieldBoost + score *= docBoost scoreWithPrecision = Math.round(score * 1000) / 1000 // Converts 1.23456789 to 1.234. // Reducing the precision so that the vectors take up less @@ -2298,7 +2653,7 @@ lunr.Builder.prototype.build = function () { invertedIndex: this.invertedIndex, fieldVectors: this.fieldVectors, tokenSet: this.tokenSet, - fields: this._fields, + fields: Object.keys(this._fields), pipeline: this.searchPipeline }) } @@ -2336,7 +2691,7 @@ lunr.Builder.prototype.use = function (fn) { */ lunr.MatchData = function (term, field, metadata) { var clonedMetadata = Object.create(null), - metadataKeys = Object.keys(metadata) + metadataKeys = Object.keys(metadata || {}) // Cloning the metadata to prevent the original // being mutated during match data combination. @@ -2349,8 +2704,11 @@ lunr.MatchData = function (term, field, metadata) { } this.metadata = Object.create(null) - this.metadata[term] = Object.create(null) - this.metadata[term][field] = clonedMetadata + + if (term !== undefined) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = clonedMetadata + } } /** @@ -2465,11 +2823,42 @@ lunr.Query = function (allFields) { * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING * }) */ + lunr.Query.wildcard = new String ("*") lunr.Query.wildcard.NONE = 0 lunr.Query.wildcard.LEADING = 1 lunr.Query.wildcard.TRAILING = 2 +/** + * Constants for indicating what kind of presence a term must have in matching documents. + * + * @constant + * @enum {number} + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with required presence + * query.term('foo', { presence: lunr.Query.presence.REQUIRED }) + */ +lunr.Query.presence = { + /** + * Term's presence in a document is optional, this is the default value. + */ + OPTIONAL: 1, + + /** + * Term's presence in a document is required, documents that do not contain + * this term will not be returned. + */ + REQUIRED: 2, + + /** + * Term's presence in a document is prohibited, documents that do contain + * this term will not be returned. + */ + PROHIBITED: 3 +} + /** * A single clause in a {@link lunr.Query} contains a term and details on how to * match that term against a {@link lunr.Index}. @@ -2479,7 +2868,8 @@ lunr.Query.wildcard.TRAILING = 2 * @property {number} [boost=1] - Any boost that should be applied when matching this clause. * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. - * @property {number} [wildcard=0] - Whether the term should have wildcards appended or prepended. + * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended. + * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents. */ /** @@ -2517,17 +2907,44 @@ lunr.Query.prototype.clause = function (clause) { clause.term = "" + clause.term + "*" } + if (!('presence' in clause)) { + clause.presence = lunr.Query.presence.OPTIONAL + } + this.clauses.push(clause) return this } +/** + * A negated query is one in which every clause has a presence of + * prohibited. These queries require some special processing to return + * the expected results. + * + * @returns boolean + */ +lunr.Query.prototype.isNegated = function () { + for (var i = 0; i < this.clauses.length; i++) { + if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) { + return false + } + } + + return true +} + /** * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} * to the list of clauses that make up this query. * - * @param {string} term - The term to add to the query. - * @param {Object} [options] - Any additional properties to add to the query clause. + * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion + * to a token or token-like string should be done before calling this method. + * + * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an + * array, each term in the array will share the same options. + * + * @param {object|object[]} term - The term(s) to add to the query. + * @param {object} [options] - Any additional properties to add to the query clause. * @returns {lunr.Query} * @see lunr.Query#clause * @see lunr.Query~Clause @@ -2539,10 +2956,17 @@ lunr.Query.prototype.clause = function (clause) { * boost: 10, * wildcard: lunr.Query.wildcard.TRAILING * }) + * @example using lunr.tokenizer to convert a string to tokens before using them as terms + * query.term(lunr.tokenizer("foo bar")) */ lunr.Query.prototype.term = function (term, options) { + if (Array.isArray(term)) { + term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this) + return this + } + var clause = options || {} - clause.term = term + clause.term = term.toString() this.clause(clause) @@ -2654,6 +3078,7 @@ lunr.QueryLexer.FIELD = 'FIELD' lunr.QueryLexer.TERM = 'TERM' lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE' lunr.QueryLexer.BOOST = 'BOOST' +lunr.QueryLexer.PRESENCE = 'PRESENCE' lunr.QueryLexer.lexField = function (lexer) { lexer.backup() @@ -2742,6 +3167,22 @@ lunr.QueryLexer.lexText = function (lexer) { return lunr.QueryLexer.lexBoost } + // "+" indicates term presence is required + // checking for length to ensure that only + // leading "+" are considered + if (char == "+" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + // "-" indicates term presence is prohibited + // checking for length to ensure that only + // leading "-" are considered + if (char == "-" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + if (char.match(lunr.QueryLexer.termSeparator)) { return lunr.QueryLexer.lexTerm } @@ -2759,7 +3200,7 @@ lunr.QueryParser.prototype.parse = function () { this.lexer.run() this.lexemes = this.lexer.lexemes - var state = lunr.QueryParser.parseFieldOrTerm + var state = lunr.QueryParser.parseClause while (state) { state = state(this) @@ -2784,7 +3225,7 @@ lunr.QueryParser.prototype.nextClause = function () { this.currentClause = {} } -lunr.QueryParser.parseFieldOrTerm = function (parser) { +lunr.QueryParser.parseClause = function (parser) { var lexeme = parser.peekLexeme() if (lexeme == undefined) { @@ -2792,6 +3233,8 @@ lunr.QueryParser.parseFieldOrTerm = function (parser) { } switch (lexeme.type) { + case lunr.QueryLexer.PRESENCE: + return lunr.QueryParser.parsePresence case lunr.QueryLexer.FIELD: return lunr.QueryParser.parseField case lunr.QueryLexer.TERM: @@ -2807,6 +3250,43 @@ lunr.QueryParser.parseFieldOrTerm = function (parser) { } } +lunr.QueryParser.parsePresence = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.str) { + case "-": + parser.currentClause.presence = lunr.Query.presence.PROHIBITED + break + case "+": + parser.currentClause.presence = lunr.Query.presence.REQUIRED + break + default: + var errorMessage = "unrecognised presence operator'" + lexeme.str + "'" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term or field, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + lunr.QueryParser.parseField = function (parser) { var lexeme = parser.consumeLexeme() @@ -2870,6 +3350,9 @@ lunr.QueryParser.parseTerm = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) @@ -2910,6 +3393,9 @@ lunr.QueryParser.parseEditDistance = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) @@ -2950,6 +3436,9 @@ lunr.QueryParser.parseBoost = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) diff --git a/dev/search/main.js b/dev/search/main.js index 0a82ab5..0e1fc81 100644 --- a/dev/search/main.js +++ b/dev/search/main.js @@ -43,7 +43,7 @@ function displayResults (results) { function doSearch () { var query = document.getElementById('mkdocs-search-query').value; - if (query.length > 2) { + if (query.length > min_search_length) { if (!window.Worker) { displayResults(search(query)); } else { @@ -73,6 +73,8 @@ function onWorkerMessage (e) { } else if (e.data.results) { var results = e.data.results; displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; } } diff --git a/dev/search/search_index.json b/dev/search/search_index.json index 3e99e68..c5178aa 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"MIPLearn MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details). Features MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized. Documentation Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments Source Code https://github.com/ANL-CEEESA/MIPLearn","title":"Home"},{"location":"#miplearn","text":"MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details).","title":"MIPLearn"},{"location":"#features","text":"MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized.","title":"Features"},{"location":"#documentation","text":"Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments","title":"Documentation"},{"location":"#source-code","text":"https://github.com/ANL-CEEESA/MIPLearn","title":"Source Code"},{"location":"about/","text":"About Authors Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov > Acknowledgments Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357. References Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696 License MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"About"},{"location":"about/#about","text":"","title":"About"},{"location":"about/#authors","text":"Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov >","title":"Authors"},{"location":"about/#acknowledgments","text":"Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.","title":"Acknowledgments"},{"location":"about/#references","text":"Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696","title":"References"},{"location":"about/#license","text":"MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"License"},{"location":"benchmark/","text":"Benchmarks Utilities Using BenchmarkRunner MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2. Saving and loading benchmark results When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Benchmark"},{"location":"benchmark/#benchmarks-utilities","text":"","title":"Benchmarks Utilities"},{"location":"benchmark/#using-benchmarkrunner","text":"MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.","title":"Using BenchmarkRunner"},{"location":"benchmark/#saving-and-loading-benchmark-results","text":"When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Saving and loading benchmark results"},{"location":"customization/","text":"Customization Customizing solver parameters Selecting the internal MIP solver By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3) Customizing solver components LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. Selecting components To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...)) Adjusting component aggressiveness The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95)) Evaluating component performance MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64 Using customized ML classifiers and regressors By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Customization"},{"location":"customization/#customization","text":"","title":"Customization"},{"location":"customization/#customizing-solver-parameters","text":"","title":"Customizing solver parameters"},{"location":"customization/#selecting-the-internal-mip-solver","text":"By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3)","title":"Selecting the internal MIP solver"},{"location":"customization/#customizing-solver-components","text":"LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables.","title":"Customizing solver components"},{"location":"customization/#selecting-components","text":"To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Selecting components"},{"location":"customization/#adjusting-component-aggressiveness","text":"The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95))","title":"Adjusting component aggressiveness"},{"location":"customization/#evaluating-component-performance","text":"MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64","title":"Evaluating component performance"},{"location":"customization/#using-customized-ml-classifiers-and-regressors","text":"By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Using customized ML classifiers and regressors"},{"location":"problems/","text":"Benchmark Problems, Challenges and Results MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters. Preliminaries Benchmark challenges When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from. Baseline results To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time. Maximum Weight Stable Set Problem Problem definition Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems. Random instance generator The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way. Challenge A Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True) Traveling Salesman Problem Problem definition Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems. Random problem generator The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled. Challenge A Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, ) Multidimensional 0-1 Knapsack Problem Problem definition Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*} Random instance generator The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21. Challenge A 250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Problems"},{"location":"problems/#benchmark-problems-challenges-and-results","text":"MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters.","title":"Benchmark Problems, Challenges and Results"},{"location":"problems/#preliminaries","text":"","title":"Preliminaries"},{"location":"problems/#benchmark-challenges","text":"When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.","title":"Benchmark challenges"},{"location":"problems/#baseline-results","text":"To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.","title":"Baseline results"},{"location":"problems/#maximum-weight-stable-set-problem","text":"","title":"Maximum Weight Stable Set Problem"},{"location":"problems/#problem-definition","text":"Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-instance-generator","text":"The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.","title":"Random instance generator"},{"location":"problems/#challenge-a","text":"Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True)","title":"Challenge A"},{"location":"problems/#traveling-salesman-problem","text":"","title":"Traveling Salesman Problem"},{"location":"problems/#problem-definition_1","text":"Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-problem-generator","text":"The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled.","title":"Random problem generator"},{"location":"problems/#challenge-a_1","text":"Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, )","title":"Challenge A"},{"location":"problems/#multidimensional-0-1-knapsack-problem","text":"","title":"Multidimensional 0-1 Knapsack Problem"},{"location":"problems/#problem-definition_2","text":"Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*}","title":"Problem definition"},{"location":"problems/#random-instance-generator_1","text":"The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21.","title":"Random instance generator"},{"location":"problems/#challenge-a_2","text":"250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Challenge A"},{"location":"usage/","text":"Usage Installation In these docs, we describe the Python/Pyomo version of the package, although a Julia/JuMP version is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi. To install MIPLearn, run: pip3 install miplearn After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Using LearningSolver The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process. Describing problem instances Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance. Obtaining heuristic solutions By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible. Saving and loading solver state After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance) Solving training instances in parallel In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances) Current Limitations Only binary and continuous decision variables are currently supported.","title":"Usage"},{"location":"usage/#usage","text":"","title":"Usage"},{"location":"usage/#installation","text":"In these docs, we describe the Python/Pyomo version of the package, although a Julia/JuMP version is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi. To install MIPLearn, run: pip3 install miplearn After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn","title":"Installation"},{"location":"usage/#using-learningsolver","text":"The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process.","title":"Using LearningSolver"},{"location":"usage/#describing-problem-instances","text":"Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance.","title":"Describing problem instances"},{"location":"usage/#obtaining-heuristic-solutions","text":"By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.","title":"Obtaining heuristic solutions"},{"location":"usage/#saving-and-loading-solver-state","text":"After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance)","title":"Saving and loading solver state"},{"location":"usage/#solving-training-instances-in-parallel","text":"In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances)","title":"Solving training instances in parallel"},{"location":"usage/#current-limitations","text":"Only binary and continuous decision variables are currently supported.","title":"Current Limitations"}]} \ No newline at end of file +{"config":{"lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"MIPLearn MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details). Features MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized. Documentation Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments Source Code https://github.com/ANL-CEEESA/MIPLearn","title":"Home"},{"location":"#miplearn","text":"MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details).","title":"MIPLearn"},{"location":"#features","text":"MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized.","title":"Features"},{"location":"#documentation","text":"Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgments","title":"Documentation"},{"location":"#source-code","text":"https://github.com/ANL-CEEESA/MIPLearn","title":"Source Code"},{"location":"about/","text":"About Authors Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov > Acknowledgments Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357. References Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696 License MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"About"},{"location":"about/#about","text":"","title":"About"},{"location":"about/#authors","text":"Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov >","title":"Authors"},{"location":"about/#acknowledgments","text":"Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.","title":"Acknowledgments"},{"location":"about/#references","text":"Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696","title":"References"},{"location":"about/#license","text":"MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"License"},{"location":"benchmark/","text":"Benchmarks Utilities Using BenchmarkRunner MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2. Saving and loading benchmark results When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Benchmark"},{"location":"benchmark/#benchmarks-utilities","text":"","title":"Benchmarks Utilities"},{"location":"benchmark/#using-benchmarkrunner","text":"MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.","title":"Using BenchmarkRunner"},{"location":"benchmark/#saving-and-loading-benchmark-results","text":"When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Saving and loading benchmark results"},{"location":"customization/","text":"Customization Customizing solver parameters Selecting the internal MIP solver By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3) Customizing solver components LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. Selecting components To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...)) Adjusting component aggressiveness The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95)) Evaluating component performance MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64 Using customized ML classifiers and regressors By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Customization"},{"location":"customization/#customization","text":"","title":"Customization"},{"location":"customization/#customizing-solver-parameters","text":"","title":"Customizing solver parameters"},{"location":"customization/#selecting-the-internal-mip-solver","text":"By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3)","title":"Selecting the internal MIP solver"},{"location":"customization/#customizing-solver-components","text":"LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables.","title":"Customizing solver components"},{"location":"customization/#selecting-components","text":"To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Selecting components"},{"location":"customization/#adjusting-component-aggressiveness","text":"The aggressiveness of classification components (such as PrimalSolutionComponent and LazyConstraintComponent ) can be adjusted through the threshold constructor argument. Internally, these components ask the ML models how confident they are on each prediction (through the predict_proba method in the sklearn API), and only take into account predictions which have probabilities above the threshold. Lowering a component's threshold increases its aggressiveness, while raising a component's threshold makes it more conservative. MIPLearn also includes MinPrecisionThreshold , a dynamic threshold which adjusts itself automatically during training to achieve a minimum desired true positive rate (also known as precision). The example below shows how to initialize a PrimalSolutionComponent which achieves 95% precision, possibly at the cost of a lower recall. To make the component more aggressive, this precision may be lowered. PrimalSolutionComponent(threshold=MinPrecisionThreshold(0.95))","title":"Adjusting component aggressiveness"},{"location":"customization/#evaluating-component-performance","text":"MIPLearn allows solver components to be modified, trained and evaluated in isolation. In the following example, we build and fit PrimalSolutionComponent outside the solver, then evaluate its performance. from miplearn import PrimalSolutionComponent # User-provided set of previously-solved instances train_instances = [...] # Construct and fit component on a subset of training instances comp = PrimalSolutionComponent() comp.fit(train_instances[:100]) # Evaluate performance on an additional set of training instances ev = comp.evaluate(train_instances[100:150]) The method evaluate returns a dictionary with performance evaluation statistics for each training instance provided, and for each type of prediction the component makes. To obtain a summary across all instances, pandas may be used, as below: import pandas as pd pd.DataFrame(ev[\"Fix one\"]).mean(axis=1) Predicted positive 3.120000 Predicted negative 196.880000 Condition positive 62.500000 Condition negative 137.500000 True positive 3.060000 True negative 137.440000 False positive 0.060000 False negative 59.440000 Accuracy 0.702500 F1 score 0.093050 Recall 0.048921 Precision 0.981667 Predicted positive (%) 1.560000 Predicted negative (%) 98.440000 Condition positive (%) 31.250000 Condition negative (%) 68.750000 True positive (%) 1.530000 True negative (%) 68.720000 False positive (%) 0.030000 False negative (%) 29.720000 dtype: float64 Regression components (such as ObjectiveValueComponent ) can also be trained and evaluated similarly, as the next example shows: from miplearn import ObjectiveValueComponent comp = ObjectiveValueComponent() comp.fit(train_instances[:100]) ev = comp.evaluate(train_instances[100:150]) import pandas as pd pd.DataFrame(ev).mean(axis=1) Mean squared error 7001.977827 Explained variance 0.519790 Max error 242.375804 Mean absolute error 65.843924 R2 0.517612 Median absolute error 65.843924 dtype: float64","title":"Evaluating component performance"},{"location":"customization/#using-customized-ml-classifiers-and-regressors","text":"By default, given a training set of instantes, MIPLearn trains a fixed set of ML classifiers and regressors, then selects the best one based on cross-validation performance. Alternatively, the user may specify which ML model a component should use through the classifier or regressor contructor parameters. The provided classifiers and regressors must follow the sklearn API. In particular, classifiers must provide the methods fit , predict_proba and predict , while regressors must provide the methods fit and predict Danger MIPLearn must be able to generate a copy of any custom ML classifiers and regressors through the standard copy.deepcopy method. This currently makes it incompatible with Keras and TensorFlow predictors. This is a known limitation, which will be addressed in a future version. The example below shows how to construct a PrimalSolutionComponent which internally uses sklearn's KNeighborsClassifiers . Any other sklearn classifier or pipeline can be used. from miplearn import PrimalSolutionComponent from sklearn.neighbors import KNeighborsClassifier comp = PrimalSolutionComponent(classifier=KNeighborsClassifier(n_neighbors=5)) comp.fit(train_instances)","title":"Using customized ML classifiers and regressors"},{"location":"problems/","text":"Benchmark Problems, Challenges and Results MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters. Preliminaries Benchmark challenges When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from. Baseline results To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time. Maximum Weight Stable Set Problem Problem definition Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems. Random instance generator The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way. Challenge A Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True) Traveling Salesman Problem Problem definition Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems. Random problem generator The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled. Challenge A Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, ) Multidimensional 0-1 Knapsack Problem Problem definition Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*} Random instance generator The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21. Challenge A 250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Problems"},{"location":"problems/#benchmark-problems-challenges-and-results","text":"MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters.","title":"Benchmark Problems, Challenges and Results"},{"location":"problems/#preliminaries","text":"","title":"Preliminaries"},{"location":"problems/#benchmark-challenges","text":"When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.","title":"Benchmark challenges"},{"location":"problems/#baseline-results","text":"To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.","title":"Baseline results"},{"location":"problems/#maximum-weight-stable-set-problem","text":"","title":"Maximum Weight Stable Set Problem"},{"location":"problems/#problem-definition","text":"Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-instance-generator","text":"The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.","title":"Random instance generator"},{"location":"problems/#challenge-a","text":"Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True)","title":"Challenge A"},{"location":"problems/#traveling-salesman-problem","text":"","title":"Traveling Salesman Problem"},{"location":"problems/#problem-definition_1","text":"Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-problem-generator","text":"The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled.","title":"Random problem generator"},{"location":"problems/#challenge-a_1","text":"Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, )","title":"Challenge A"},{"location":"problems/#multidimensional-0-1-knapsack-problem","text":"","title":"Multidimensional 0-1 Knapsack Problem"},{"location":"problems/#problem-definition_2","text":"Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*}","title":"Problem definition"},{"location":"problems/#random-instance-generator_1","text":"The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21.","title":"Random instance generator"},{"location":"problems/#challenge-a_2","text":"250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Challenge A"},{"location":"usage/","text":"Usage Installation In these docs, we describe the Python/Pyomo version of the package, although a Julia/JuMP version is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi. To install MIPLearn, run: pip3 install miplearn After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Using LearningSolver The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process. Describing problem instances Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance. Obtaining heuristic solutions By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible. Saving and loading solver state After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance) Solving training instances in parallel In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances) Current Limitations Only binary and continuous decision variables are currently supported.","title":"Usage"},{"location":"usage/#usage","text":"","title":"Usage"},{"location":"usage/#installation","text":"In these docs, we describe the Python/Pyomo version of the package, although a Julia/JuMP version is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi. To install MIPLearn, run: pip3 install miplearn After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn","title":"Installation"},{"location":"usage/#using-learningsolver","text":"The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process.","title":"Using LearningSolver"},{"location":"usage/#describing-problem-instances","text":"Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance.","title":"Describing problem instances"},{"location":"usage/#obtaining-heuristic-solutions","text":"By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.","title":"Obtaining heuristic solutions"},{"location":"usage/#saving-and-loading-solver-state","text":"After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance)","title":"Saving and loading solver state"},{"location":"usage/#solving-training-instances-in-parallel","text":"In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances)","title":"Solving training instances in parallel"},{"location":"usage/#current-limitations","text":"Only binary and continuous decision variables are currently supported.","title":"Current Limitations"}]} \ No newline at end of file diff --git a/dev/search/worker.js b/dev/search/worker.js index a3ccc07..9cce2f7 100644 --- a/dev/search/worker.js +++ b/dev/search/worker.js @@ -58,6 +58,7 @@ function onScriptsLoaded () { if (data.config && data.config.separator && data.config.separator.length) { lunr.tokenizer.separator = new RegExp(data.config.separator); } + if (data.index) { index = lunr.Index.load(data.index); data.docs.forEach(function (doc) { @@ -84,6 +85,7 @@ function onScriptsLoaded () { console.log('Lunr index built, search ready'); } allowSearch = true; + postMessage({config: data.config}); postMessage({allowSearch: allowSearch}); } diff --git a/dev/sitemap.xml b/dev/sitemap.xml index dbf3ef5..170d005 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -1,31 +1,25 @@ - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily - - + None 2020-08-29 daily diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index af5e79091623b1ec4b189343eaec051f139913ea..54e83ff434e7771ebce44d308edf814628c661f9 100644 GIT binary patch literal 197 zcmV;$06PC4iwFp?;!0lv|8r?{Wo=<_E_iKh0PT{o4#FT1hW9=NVJ~2()=;`RI_U!t ziY*C+%AwV_7n+*5_y%P;?z{i;EyugpWYJRxFy6KJKo~~aDc>76wD@{Cl{M}}!5_hb zjwpp4w0MkhK2o|Y3jq`LBxfK#3>4Iu8lW4bj5Ju}T~VO?xb;e$z0;iL-xo@VlkY{v zDps<(teQ05*cMTvbc4PHeHXnn_RZ-JSu>nw&FNly}~zzLzVw9*4) zLWInO1hY}}_69-6)}C$l@83WBn&r)7u;{J>81HJlBMc+$ly8meYJ5H&@)9>9!1JKo^j8s@;DAjymQHWcw#L+v=>8w7cggE$C z6s%x5EAygC>(yH$s+X?Qm!PkrmBv0f{c#ow{bDxY2K)>7Zt2Ix3+bg+Z4(0k05vdQ A8~^|S diff --git a/dev/usage/index.html b/dev/usage/index.html index edf6cce..f86204d 100644 --- a/dev/usage/index.html +++ b/dev/usage/index.html @@ -8,7 +8,7 @@ - + Usage - MIPLearn @@ -24,6 +24,10 @@ + + + + @@ -118,7 +122,7 @@
  • - Edit on GitHub + Edit on GitHub
  • @@ -266,7 +270,13 @@ solver.parallel_solve(test_instances) + + + + + +