diff --git a/docs/404.html b/docs/404.html index 7dbe612..dfd92d1 100644 --- a/docs/404.html +++ b/docs/404.html @@ -8,7 +8,7 @@ - + MIPLearn diff --git a/docs/about/index.html b/docs/about/index.html index 5486cd8..78170d9 100644 --- a/docs/about/index.html +++ b/docs/about/index.html @@ -8,7 +8,7 @@ - + About - MIPLearn diff --git a/docs/benchmark/index.html b/docs/benchmark/index.html index 2f672ad..16ae48a 100644 --- a/docs/benchmark/index.html +++ b/docs/benchmark/index.html @@ -8,7 +8,7 @@ - + Benchmark - MIPLearn diff --git a/docs/customization/index.html b/docs/customization/index.html index 3551adf..702dae9 100644 --- a/docs/customization/index.html +++ b/docs/customization/index.html @@ -8,7 +8,7 @@ - + Customization - MIPLearn diff --git a/docs/index.html b/docs/index.html index 520a8b8..f4c295d 100644 --- a/docs/index.html +++ b/docs/index.html @@ -8,7 +8,7 @@ - + Home - MIPLearn @@ -268,6 +268,6 @@ diff --git a/docs/js/highlight.pack.js b/docs/js/highlight.pack.js deleted file mode 100644 index 763f9c5..0000000 --- a/docs/js/highlight.pack.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! highlight.js v9.13.1 | BSD3 License | git.io/hljslicense */ -!function(e){var n="object"==typeof window&&window||"object"==typeof self&&self;"undefined"!=typeof exports?e(exports):n&&(n.hljs=e({}),"function"==typeof define&&define.amd&&define([],function(){return n.hljs}))}(function(e){function n(e){return e.replace(/&/g,"&").replace(//g,">")}function t(e){return e.nodeName.toLowerCase()}function r(e,n){var t=e&&e.exec(n);return t&&0===t.index}function a(e){return k.test(e)}function i(e){var n,t,r,i,o=e.className+" ";if(o+=e.parentNode?e.parentNode.className:"",t=M.exec(o))return w(t[1])?t[1]:"no-highlight";for(o=o.split(/\s+/),n=0,r=o.length;r>n;n++)if(i=o[n],a(i)||w(i))return i}function o(e){var n,t={},r=Array.prototype.slice.call(arguments,1);for(n in e)t[n]=e[n];return r.forEach(function(e){for(n in e)t[n]=e[n]}),t}function c(e){var n=[];return function r(e,a){for(var i=e.firstChild;i;i=i.nextSibling)3===i.nodeType?a+=i.nodeValue.length:1===i.nodeType&&(n.push({event:"start",offset:a,node:i}),a=r(i,a),t(i).match(/br|hr|img|input/)||n.push({event:"stop",offset:a,node:i}));return a}(e,0),n}function u(e,r,a){function i(){return e.length&&r.length?e[0].offset!==r[0].offset?e[0].offset"}function c(e){l+=""}function u(e){("start"===e.event?o:c)(e.node)}for(var s=0,l="",f=[];e.length||r.length;){var g=i();if(l+=n(a.substring(s,g[0].offset)),s=g[0].offset,g===e){f.reverse().forEach(c);do u(g.splice(0,1)[0]),g=i();while(g===e&&g.length&&g[0].offset===s);f.reverse().forEach(o)}else"start"===g[0].event?f.push(g[0].node):f.pop(),u(g.splice(0,1)[0])}return l+n(a.substr(s))}function s(e){return e.v&&!e.cached_variants&&(e.cached_variants=e.v.map(function(n){return o(e,{v:null},n)})),e.cached_variants||e.eW&&[o(e)]||[e]}function l(e){function n(e){return e&&e.source||e}function t(t,r){return new RegExp(n(t),"m"+(e.cI?"i":"")+(r?"g":""))}function r(a,i){if(!a.compiled){if(a.compiled=!0,a.k=a.k||a.bK,a.k){var o={},c=function(n,t){e.cI&&(t=t.toLowerCase()),t.split(" ").forEach(function(e){var t=e.split("|");o[t[0]]=[n,t[1]?Number(t[1]):1]})};"string"==typeof a.k?c("keyword",a.k):B(a.k).forEach(function(e){c(e,a.k[e])}),a.k=o}a.lR=t(a.l||/\w+/,!0),i&&(a.bK&&(a.b="\\b("+a.bK.split(" ").join("|")+")\\b"),a.b||(a.b=/\B|\b/),a.bR=t(a.b),a.endSameAsBegin&&(a.e=a.b),a.e||a.eW||(a.e=/\B|\b/),a.e&&(a.eR=t(a.e)),a.tE=n(a.e)||"",a.eW&&i.tE&&(a.tE+=(a.e?"|":"")+i.tE)),a.i&&(a.iR=t(a.i)),null==a.r&&(a.r=1),a.c||(a.c=[]),a.c=Array.prototype.concat.apply([],a.c.map(function(e){return s("self"===e?a:e)})),a.c.forEach(function(e){r(e,a)}),a.starts&&r(a.starts,i);var u=a.c.map(function(e){return e.bK?"\\.?("+e.b+")\\.?":e.b}).concat([a.tE,a.i]).map(n).filter(Boolean);a.t=u.length?t(u.join("|"),!0):{exec:function(){return null}}}}r(e)}function f(e,t,a,i){function o(e){return new RegExp(e.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&"),"m")}function c(e,n){var t,a;for(t=0,a=n.c.length;a>t;t++)if(r(n.c[t].bR,e))return n.c[t].endSameAsBegin&&(n.c[t].eR=o(n.c[t].bR.exec(e)[0])),n.c[t]}function u(e,n){if(r(e.eR,n)){for(;e.endsParent&&e.parent;)e=e.parent;return e}return e.eW?u(e.parent,n):void 0}function s(e,n){return!a&&r(n.iR,e)}function p(e,n){var t=R.cI?n[0].toLowerCase():n[0];return e.k.hasOwnProperty(t)&&e.k[t]}function d(e,n,t,r){var a=r?"":j.classPrefix,i='',i+n+o}function h(){var e,t,r,a;if(!E.k)return n(k);for(a="",t=0,E.lR.lastIndex=0,r=E.lR.exec(k);r;)a+=n(k.substring(t,r.index)),e=p(E,r),e?(M+=e[1],a+=d(e[0],n(r[0]))):a+=n(r[0]),t=E.lR.lastIndex,r=E.lR.exec(k);return a+n(k.substr(t))}function b(){var e="string"==typeof E.sL;if(e&&!L[E.sL])return n(k);var t=e?f(E.sL,k,!0,B[E.sL]):g(k,E.sL.length?E.sL:void 0);return E.r>0&&(M+=t.r),e&&(B[E.sL]=t.top),d(t.language,t.value,!1,!0)}function v(){y+=null!=E.sL?b():h(),k=""}function m(e){y+=e.cN?d(e.cN,"",!0):"",E=Object.create(e,{parent:{value:E}})}function N(e,n){if(k+=e,null==n)return v(),0;var t=c(n,E);if(t)return t.skip?k+=n:(t.eB&&(k+=n),v(),t.rB||t.eB||(k=n)),m(t,n),t.rB?0:n.length;var r=u(E,n);if(r){var a=E;a.skip?k+=n:(a.rE||a.eE||(k+=n),v(),a.eE&&(k=n));do E.cN&&(y+=I),E.skip||E.sL||(M+=E.r),E=E.parent;while(E!==r.parent);return r.starts&&(r.endSameAsBegin&&(r.starts.eR=r.eR),m(r.starts,"")),a.rE?0:n.length}if(s(n,E))throw new Error('Illegal lexeme "'+n+'" for mode "'+(E.cN||"")+'"');return k+=n,n.length||1}var R=w(e);if(!R)throw new Error('Unknown language: "'+e+'"');l(R);var x,E=i||R,B={},y="";for(x=E;x!==R;x=x.parent)x.cN&&(y=d(x.cN,"",!0)+y);var k="",M=0;try{for(var C,A,S=0;;){if(E.t.lastIndex=S,C=E.t.exec(t),!C)break;A=N(t.substring(S,C.index),C[0]),S=C.index+A}for(N(t.substr(S)),x=E;x.parent;x=x.parent)x.cN&&(y+=I);return{r:M,value:y,language:e,top:E}}catch(O){if(O.message&&-1!==O.message.indexOf("Illegal"))return{r:0,value:n(t)};throw O}}function g(e,t){t=t||j.languages||B(L);var r={r:0,value:n(e)},a=r;return t.filter(w).filter(x).forEach(function(n){var t=f(n,e,!1);t.language=n,t.r>a.r&&(a=t),t.r>r.r&&(a=r,r=t)}),a.language&&(r.second_best=a),r}function p(e){return j.tabReplace||j.useBR?e.replace(C,function(e,n){return j.useBR&&"\n"===e?"
":j.tabReplace?n.replace(/\t/g,j.tabReplace):""}):e}function d(e,n,t){var r=n?y[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),-1===e.indexOf(r)&&a.push(r),a.join(" ").trim()}function h(e){var n,t,r,o,s,l=i(e);a(l)||(j.useBR?(n=document.createElementNS("http://www.w3.org/1999/xhtml","div"),n.innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n")):n=e,s=n.textContent,r=l?f(l,s,!0):g(s),t=c(n),t.length&&(o=document.createElementNS("http://www.w3.org/1999/xhtml","div"),o.innerHTML=r.value,r.value=u(t,c(o),s)),r.value=p(r.value),e.innerHTML=r.value,e.className=d(e.className,l,r.language),e.result={language:r.language,re:r.r},r.second_best&&(e.second_best={language:r.second_best.language,re:r.second_best.r}))}function b(e){j=o(j,e)}function v(){if(!v.called){v.called=!0;var e=document.querySelectorAll("pre code");E.forEach.call(e,h)}}function m(){addEventListener("DOMContentLoaded",v,!1),addEventListener("load",v,!1)}function N(n,t){var r=L[n]=t(e);r.aliases&&r.aliases.forEach(function(e){y[e]=n})}function R(){return B(L)}function w(e){return e=(e||"").toLowerCase(),L[e]||L[y[e]]}function x(e){var n=w(e);return n&&!n.disableAutodetect}var E=[],B=Object.keys,L={},y={},k=/^(no-?highlight|plain|text)$/i,M=/\blang(?:uage)?-([\w-]+)\b/i,C=/((^(<[^>]+>|\t|)+|(?:\n)))/gm,I="
",j={classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:void 0};return e.highlight=f,e.highlightAuto=g,e.fixMarkup=p,e.highlightBlock=h,e.configure=b,e.initHighlighting=v,e.initHighlightingOnLoad=m,e.registerLanguage=N,e.listLanguages=R,e.getLanguage=w,e.autoDetection=x,e.inherit=o,e.IR="[a-zA-Z]\\w*",e.UIR="[a-zA-Z_]\\w*",e.NR="\\b\\d+(\\.\\d+)?",e.CNR="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",e.BNR="\\b(0b[01]+)",e.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",e.BE={b:"\\\\[\\s\\S]",r:0},e.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[e.BE]},e.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[e.BE]},e.PWM={b:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},e.C=function(n,t,r){var a=e.inherit({cN:"comment",b:n,e:t,c:[]},r||{});return a.c.push(e.PWM),a.c.push({cN:"doctag",b:"(?:TODO|FIXME|NOTE|BUG|XXX):",r:0}),a},e.CLCM=e.C("//","$"),e.CBCM=e.C("/\\*","\\*/"),e.HCM=e.C("#","$"),e.NM={cN:"number",b:e.NR,r:0},e.CNM={cN:"number",b:e.CNR,r:0},e.BNM={cN:"number",b:e.BNR,r:0},e.CSSNM={cN:"number",b:e.NR+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",r:0},e.RM={cN:"regexp",b:/\//,e:/\/[gimuy]*/,i:/\n/,c:[e.BE,{b:/\[/,e:/\]/,r:0,c:[e.BE]}]},e.TM={cN:"title",b:e.IR,r:0},e.UTM={cN:"title",b:e.UIR,r:0},e.METHOD_GUARD={b:"\\.\\s*"+e.UIR,r:0},e});hljs.registerLanguage("objectivec",function(e){var t={cN:"built_in",b:"\\b(AV|CA|CF|CG|CI|CL|CM|CN|CT|MK|MP|MTK|MTL|NS|SCN|SK|UI|WK|XC)\\w+"},_={keyword:"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required @encode @package @import @defs @compatibility_alias __bridge __bridge_transfer __bridge_retained __bridge_retain __covariant __contravariant __kindof _Nonnull _Nullable _Null_unspecified __FUNCTION__ __PRETTY_FUNCTION__ __attribute__ getter setter retain unsafe_unretained nonnull nullable null_unspecified null_resettable class instancetype NS_DESIGNATED_INITIALIZER NS_UNAVAILABLE NS_REQUIRES_SUPER NS_RETURNS_INNER_POINTER NS_INLINE NS_AVAILABLE NS_DEPRECATED NS_ENUM NS_OPTIONS NS_SWIFT_UNAVAILABLE NS_ASSUME_NONNULL_BEGIN NS_ASSUME_NONNULL_END NS_REFINED_FOR_SWIFT NS_SWIFT_NAME NS_SWIFT_NOTHROW NS_DURING NS_HANDLER NS_ENDHANDLER NS_VALUERETURN NS_VOIDRETURN",literal:"false true FALSE TRUE nil YES NO NULL",built_in:"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once"},i=/[a-zA-Z@][a-zA-Z0-9_]*/,n="@interface @class @protocol @implementation";return{aliases:["mm","objc","obj-c"],k:_,l:i,i:""}]}]},{cN:"class",b:"("+n.split(" ").join("|")+")\\b",e:"({|$)",eE:!0,k:n,l:i,c:[e.UTM]},{b:"\\."+e.UIR,r:0}]}});hljs.registerLanguage("sql",function(e){var t=e.C("--","$");return{cI:!0,i:/[<>{}*]/,c:[{bK:"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke comment with",e:/;/,eW:!0,l:/[\w\.]+/,k:{keyword:"as abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias allocate allow alter always analyze ancillary and any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound buffer_cache buffer_pool build bulk by byte byteordermark bytes cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain export export_set extended extent external external_1 external_2 externally extract failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force foreign form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour http id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists keep keep_duplicates key keys kill language large last last_day last_insert_id last_value lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notnull notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second section securefile security seed segment select self sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime table tables tablespace tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unnest unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek",literal:"true false null unknown",built_in:"array bigint binary bit blob bool boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text time timestamp varchar varying void"},c:[{cN:"string",b:"'",e:"'",c:[e.BE,{b:"''"}]},{cN:"string",b:'"',e:'"',c:[e.BE,{b:'""'}]},{cN:"string",b:"`",e:"`",c:[e.BE]},e.CNM,e.CBCM,t,e.HCM]},e.CBCM,t,e.HCM]}});hljs.registerLanguage("ruby",function(e){var b="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?",r={keyword:"and then defined module in return redo if BEGIN retry end for self when next until do begin unless END rescue else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor",literal:"true false nil"},c={cN:"doctag",b:"@[A-Za-z]+"},a={b:"#<",e:">"},s=[e.C("#","$",{c:[c]}),e.C("^\\=begin","^\\=end",{c:[c],r:10}),e.C("^__END__","\\n$")],n={cN:"subst",b:"#\\{",e:"}",k:r},t={cN:"string",c:[e.BE,n],v:[{b:/'/,e:/'/},{b:/"/,e:/"/},{b:/`/,e:/`/},{b:"%[qQwWx]?\\(",e:"\\)"},{b:"%[qQwWx]?\\[",e:"\\]"},{b:"%[qQwWx]?{",e:"}"},{b:"%[qQwWx]?<",e:">"},{b:"%[qQwWx]?/",e:"/"},{b:"%[qQwWx]?%",e:"%"},{b:"%[qQwWx]?-",e:"-"},{b:"%[qQwWx]?\\|",e:"\\|"},{b:/\B\?(\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b/},{b:/<<(-?)\w+$/,e:/^\s*\w+$/}]},i={cN:"params",b:"\\(",e:"\\)",endsParent:!0,k:r},d=[t,a,{cN:"class",bK:"class module",e:"$|;",i:/=/,c:[e.inherit(e.TM,{b:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?"}),{b:"<\\s*",c:[{b:"("+e.IR+"::)?"+e.IR}]}].concat(s)},{cN:"function",bK:"def",e:"$|;",c:[e.inherit(e.TM,{b:b}),i].concat(s)},{b:e.IR+"::"},{cN:"symbol",b:e.UIR+"(\\!|\\?)?:",r:0},{cN:"symbol",b:":(?!\\s)",c:[t,{b:b}],r:0},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{b:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{cN:"params",b:/\|/,e:/\|/,k:r},{b:"("+e.RSR+"|unless)\\s*",k:"unless",c:[a,{cN:"regexp",c:[e.BE,n],i:/\n/,v:[{b:"/",e:"/[a-z]*"},{b:"%r{",e:"}[a-z]*"},{b:"%r\\(",e:"\\)[a-z]*"},{b:"%r!",e:"![a-z]*"},{b:"%r\\[",e:"\\][a-z]*"}]}].concat(s),r:0}].concat(s);n.c=d,i.c=d;var l="[>?]>",o="[\\w#]+\\(\\w+\\):\\d+:\\d+>",u="(\\w+-)?\\d+\\.\\d+\\.\\d(p\\d+)?[^>]+>",w=[{b:/^\s*=>/,starts:{e:"$",c:d}},{cN:"meta",b:"^("+l+"|"+o+"|"+u+")",starts:{e:"$",c:d}}];return{aliases:["rb","gemspec","podspec","thor","irb"],k:r,i:/\/\*/,c:s.concat(w).concat(d)}});hljs.registerLanguage("yaml",function(e){var b="true false yes no null",a="^[ \\-]*",r="[a-zA-Z_][\\w\\-]*",t={cN:"attr",v:[{b:a+r+":"},{b:a+'"'+r+'":'},{b:a+"'"+r+"':"}]},c={cN:"template-variable",v:[{b:"{{",e:"}}"},{b:"%{",e:"}"}]},l={cN:"string",r:0,v:[{b:/'/,e:/'/},{b:/"/,e:/"/},{b:/\S+/}],c:[e.BE,c]};return{cI:!0,aliases:["yml","YAML","yaml"],c:[t,{cN:"meta",b:"^---s*$",r:10},{cN:"string",b:"[\\|>] *$",rE:!0,c:l.c,e:t.v[0].b},{b:"<%[%=-]?",e:"[%-]?%>",sL:"ruby",eB:!0,eE:!0,r:0},{cN:"type",b:"!"+e.UIR},{cN:"type",b:"!!"+e.UIR},{cN:"meta",b:"&"+e.UIR+"$"},{cN:"meta",b:"\\*"+e.UIR+"$"},{cN:"bullet",b:"^ *-",r:0},e.HCM,{bK:b,k:{literal:b}},e.CNM,l]}});hljs.registerLanguage("xml",function(s){var e="[A-Za-z0-9\\._:-]+",t={eW:!0,i:/`]+/}]}]}]};return{aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist"],cI:!0,c:[{cN:"meta",b:"",r:10,c:[{b:"\\[",e:"\\]"}]},s.C("",{r:10}),{b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{cN:"meta",b:/<\?xml/,e:/\?>/,r:10},{b:/<\?(php)?/,e:/\?>/,sL:"php",c:[{b:"/\\*",e:"\\*/",skip:!0},{b:'b"',e:'"',skip:!0},{b:"b'",e:"'",skip:!0},s.inherit(s.ASM,{i:null,cN:null,c:null,skip:!0}),s.inherit(s.QSM,{i:null,cN:null,c:null,skip:!0})]},{cN:"tag",b:"|$)",e:">",k:{name:"style"},c:[t],starts:{e:"",rE:!0,sL:["css","xml"]}},{cN:"tag",b:"|$)",e:">",k:{name:"script"},c:[t],starts:{e:"",rE:!0,sL:["actionscript","javascript","handlebars","xml"]}},{cN:"tag",b:"",c:[{cN:"name",b:/[^\/><\s]+/,r:0},t]}]}});hljs.registerLanguage("markdown",function(e){return{aliases:["md","mkdown","mkd"],c:[{cN:"section",v:[{b:"^#{1,6}",e:"$"},{b:"^.+?\\n[=-]{2,}$"}]},{b:"<",e:">",sL:"xml",r:0},{cN:"bullet",b:"^([*+-]|(\\d+\\.))\\s+"},{cN:"strong",b:"[*_]{2}.+?[*_]{2}"},{cN:"emphasis",v:[{b:"\\*.+?\\*"},{b:"_.+?_",r:0}]},{cN:"quote",b:"^>\\s+",e:"$"},{cN:"code",v:[{b:"^```w*s*$",e:"^```s*$"},{b:"`.+?`"},{b:"^( {4}| )",e:"$",r:0}]},{b:"^[-\\*]{3,}",e:"$"},{b:"\\[.+?\\][\\(\\[].*?[\\)\\]]",rB:!0,c:[{cN:"string",b:"\\[",e:"\\]",eB:!0,rE:!0,r:0},{cN:"link",b:"\\]\\(",e:"\\)",eB:!0,eE:!0},{cN:"symbol",b:"\\]\\[",e:"\\]",eB:!0,eE:!0}],r:10},{b:/^\[[^\n]+\]:/,rB:!0,c:[{cN:"symbol",b:/\[/,e:/\]/,eB:!0,eE:!0},{cN:"link",b:/:\s*/,e:/$/,eB:!0}]}]}});hljs.registerLanguage("perl",function(e){var t="getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qqfileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmgetsub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedirioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when",r={cN:"subst",b:"[$@]\\{",e:"\\}",k:t},s={b:"->{",e:"}"},n={v:[{b:/\$\d/},{b:/[\$%@](\^\w\b|#\w+(::\w+)*|{\w+}|\w+(::\w*)*)/},{b:/[\$%@][^\s\w{]/,r:0}]},i=[e.BE,r,n],o=[n,e.HCM,e.C("^\\=\\w","\\=cut",{eW:!0}),s,{cN:"string",c:i,v:[{b:"q[qwxr]?\\s*\\(",e:"\\)",r:5},{b:"q[qwxr]?\\s*\\[",e:"\\]",r:5},{b:"q[qwxr]?\\s*\\{",e:"\\}",r:5},{b:"q[qwxr]?\\s*\\|",e:"\\|",r:5},{b:"q[qwxr]?\\s*\\<",e:"\\>",r:5},{b:"qw\\s+q",e:"q",r:5},{b:"'",e:"'",c:[e.BE]},{b:'"',e:'"'},{b:"`",e:"`",c:[e.BE]},{b:"{\\w+}",c:[],r:0},{b:"-?\\w+\\s*\\=\\>",c:[],r:0}]},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{b:"(\\/\\/|"+e.RSR+"|\\b(split|return|print|reverse|grep)\\b)\\s*",k:"split return print reverse grep",r:0,c:[e.HCM,{cN:"regexp",b:"(s|tr|y)/(\\\\.|[^/])*/(\\\\.|[^/])*/[a-z]*",r:10},{cN:"regexp",b:"(m|qr)?/",e:"/[a-z]*",c:[e.BE],r:0}]},{cN:"function",bK:"sub",e:"(\\s*\\(.*?\\))?[;{]",eE:!0,r:5,c:[e.TM]},{b:"-\\w\\b",r:0},{b:"^__DATA__$",e:"^__END__$",sL:"mojolicious",c:[{b:"^@@.*",e:"$",cN:"comment"}]}];return r.c=o,s.c=o,{aliases:["pl","pm"],l:/[\w\.]+/,k:t,c:o}});hljs.registerLanguage("java",function(e){var a="[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*",t=a+"(<"+a+"(\\s*,\\s*"+a+")*>)?",r="false synchronized int abstract float private char boolean var static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",s="\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",c={cN:"number",b:s,r:0};return{aliases:["jsp"],k:r,i:/<\/|#/,c:[e.C("/\\*\\*","\\*/",{r:0,c:[{b:/\w+@/,r:0},{cN:"doctag",b:"@[A-Za-z]+"}]}),e.CLCM,e.CBCM,e.ASM,e.QSM,{cN:"class",bK:"class interface",e:/[{;=]/,eE:!0,k:"class interface",i:/[:"\[\]]/,c:[{bK:"extends implements"},e.UTM]},{bK:"new throw return else",r:0},{cN:"function",b:"("+t+"\\s+)+"+e.UIR+"\\s*\\(",rB:!0,e:/[{;=]/,eE:!0,k:r,c:[{b:e.UIR+"\\s*\\(",rB:!0,r:0,c:[e.UTM]},{cN:"params",b:/\(/,e:/\)/,k:r,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]},c,{cN:"meta",b:"@[A-Za-z]+"}]}});hljs.registerLanguage("bash",function(e){var t={cN:"variable",v:[{b:/\$[\w\d#@][\w\d_]*/},{b:/\$\{(.*?)}/}]},s={cN:"string",b:/"/,e:/"/,c:[e.BE,t,{cN:"variable",b:/\$\(/,e:/\)/,c:[e.BE]}]},a={cN:"string",b:/'/,e:/'/};return{aliases:["sh","zsh"],l:/\b-?[a-z\._]+\b/,k:{keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},c:[{cN:"meta",b:/^#![^\n]+sh\s*$/,r:10},{cN:"function",b:/\w[\w\d_]*\s*\(\s*\)\s*\{/,rB:!0,c:[e.inherit(e.TM,{b:/\w[\w\d_]*/})],r:0},e.HCM,s,a,t]}});hljs.registerLanguage("shell",function(s){return{aliases:["console"],c:[{cN:"meta",b:"^\\s{0,3}[\\w\\d\\[\\]()@-]*[>%$#]",starts:{e:"$",sL:"bash"}}]}});hljs.registerLanguage("swift",function(e){var i={keyword:"#available #colorLiteral #column #else #elseif #endif #file #fileLiteral #function #if #imageLiteral #line #selector #sourceLocation _ __COLUMN__ __FILE__ __FUNCTION__ __LINE__ Any as as! as? associatedtype associativity break case catch class continue convenience default defer deinit didSet do dynamic dynamicType else enum extension fallthrough false fileprivate final for func get guard if import in indirect infix init inout internal is lazy left let mutating nil none nonmutating open operator optional override postfix precedence prefix private protocol Protocol public repeat required rethrows return right self Self set static struct subscript super switch throw throws true try try! try? Type typealias unowned var weak where while willSet",literal:"true false nil",built_in:"abs advance alignof alignofValue anyGenerator assert assertionFailure bridgeFromObjectiveC bridgeFromObjectiveCUnconditional bridgeToObjectiveC bridgeToObjectiveCUnconditional c contains count countElements countLeadingZeros debugPrint debugPrintln distance dropFirst dropLast dump encodeBitsAsWords enumerate equal fatalError filter find getBridgedObjectiveCType getVaList indices insertionSort isBridgedToObjectiveC isBridgedVerbatimToObjectiveC isUniquelyReferenced isUniquelyReferencedNonObjC join lazy lexicographicalCompare map max maxElement min minElement numericCast overlaps partition posix precondition preconditionFailure print println quickSort readLine reduce reflect reinterpretCast reverse roundUpToAlignment sizeof sizeofValue sort split startsWith stride strideof strideofValue swap toString transcode underestimateCount unsafeAddressOf unsafeBitCast unsafeDowncast unsafeUnwrap unsafeReflect withExtendedLifetime withObjectAtPlusZero withUnsafePointer withUnsafePointerToObject withUnsafeMutablePointer withUnsafeMutablePointers withUnsafePointer withUnsafePointers withVaList zip"},t={cN:"type",b:"\\b[A-Z][\\wÀ-ʸ']*",r:0},n=e.C("/\\*","\\*/",{c:["self"]}),r={cN:"subst",b:/\\\(/,e:"\\)",k:i,c:[]},a={cN:"string",c:[e.BE,r],v:[{b:/"""/,e:/"""/},{b:/"/,e:/"/}]},o={cN:"number",b:"\\b([\\d_]+(\\.[\\deE_]+)?|0x[a-fA-F0-9_]+(\\.[a-fA-F0-9p_]+)?|0b[01_]+|0o[0-7_]+)\\b",r:0};return r.c=[o],{k:i,c:[a,e.CLCM,n,t,o,{cN:"function",bK:"func",e:"{",eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][0-9A-Za-z$_]*/}),{b://},{cN:"params",b:/\(/,e:/\)/,endsParent:!0,k:i,c:["self",o,a,e.CBCM,{b:":"}],i:/["']/}],i:/\[|%/},{cN:"class",bK:"struct protocol class extension enum",k:i,e:"\\{",eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/})]},{cN:"meta",b:"(@discardableResult|@warn_unused_result|@exported|@lazy|@noescape|@NSCopying|@NSManaged|@objc|@objcMembers|@convention|@required|@noreturn|@IBAction|@IBDesignable|@IBInspectable|@IBOutlet|@infix|@prefix|@postfix|@autoclosure|@testable|@available|@nonobjc|@NSApplicationMain|@UIApplicationMain)"},{bK:"import",e:/$/,c:[e.CLCM,n]}]}});hljs.registerLanguage("json",function(e){var i={literal:"true false null"},n=[e.QSM,e.CNM],r={e:",",eW:!0,eE:!0,c:n,k:i},t={b:"{",e:"}",c:[{cN:"attr",b:/"/,e:/"/,c:[e.BE],i:"\\n"},e.inherit(r,{b:/:/})],i:"\\S"},c={b:"\\[",e:"\\]",c:[e.inherit(r)],i:"\\S"};return n.splice(n.length,0,t,c),{c:n,k:i,i:"\\S"}});hljs.registerLanguage("nginx",function(e){var r={cN:"variable",v:[{b:/\$\d+/},{b:/\$\{/,e:/}/},{b:"[\\$\\@]"+e.UIR}]},b={eW:!0,l:"[a-z/_]+",k:{literal:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll"},r:0,i:"=>",c:[e.HCM,{cN:"string",c:[e.BE,r],v:[{b:/"/,e:/"/},{b:/'/,e:/'/}]},{b:"([a-z]+):/",e:"\\s",eW:!0,eE:!0,c:[r]},{cN:"regexp",c:[e.BE,r],v:[{b:"\\s\\^",e:"\\s|{|;",rE:!0},{b:"~\\*?\\s+",e:"\\s|{|;",rE:!0},{b:"\\*(\\.[a-z\\-]+)+"},{b:"([a-z\\-]+\\.)+\\*"}]},{cN:"number",b:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{cN:"number",b:"\\b\\d+[kKmMgGdshdwy]*\\b",r:0},r]};return{aliases:["nginxconf"],c:[e.HCM,{b:e.UIR+"\\s+{",rB:!0,e:"{",c:[{cN:"section",b:e.UIR}],r:0},{b:e.UIR+"\\s",e:";|{",rB:!0,c:[{cN:"attribute",b:e.UIR,starts:b}],r:0}],i:"[^\\s\\}]"}});hljs.registerLanguage("ini",function(e){var b={cN:"string",c:[e.BE],v:[{b:"'''",e:"'''",r:10},{b:'"""',e:'"""',r:10},{b:'"',e:'"'},{b:"'",e:"'"}]};return{aliases:["toml"],cI:!0,i:/\S/,c:[e.C(";","$"),e.HCM,{cN:"section",b:/^\s*\[+/,e:/\]+/},{b:/^[a-z0-9\[\]_-]+\s*=\s*/,e:"$",rB:!0,c:[{cN:"attr",b:/[a-z0-9\[\]_-]+/},{b:/=/,eW:!0,r:0,c:[{cN:"literal",b:/\bon|off|true|false|yes|no\b/},{cN:"variable",v:[{b:/\$[\w\d"][\w\d_]*/},{b:/\$\{(.*?)}/}]},b,{cN:"number",b:/([\+\-]+)?[\d]+_[\d_]+/},e.NM]}]}]}});hljs.registerLanguage("http",function(e){var t="HTTP/[0-9\\.]+";return{aliases:["https"],i:"\\S",c:[{b:"^"+t,e:"$",c:[{cN:"number",b:"\\b\\d{3}\\b"}]},{b:"^[A-Z]+ (.*?) "+t+"$",rB:!0,e:"$",c:[{cN:"string",b:" ",e:" ",eB:!0,eE:!0},{b:t},{cN:"keyword",b:"[A-Z]+"}]},{cN:"attribute",b:"^\\w",e:": ",eE:!0,i:"\\n|\\s|=",starts:{e:"$",r:0}},{b:"\\n\\n",starts:{sL:[],eW:!0}}]}});hljs.registerLanguage("css",function(e){var c="[a-zA-Z-][a-zA-Z0-9_-]*",t={b:/[A-Z\_\.\-]+\s*:/,rB:!0,e:";",eW:!0,c:[{cN:"attribute",b:/\S/,e:":",eE:!0,starts:{eW:!0,eE:!0,c:[{b:/[\w-]+\(/,rB:!0,c:[{cN:"built_in",b:/[\w-]+/},{b:/\(/,e:/\)/,c:[e.ASM,e.QSM]}]},e.CSSNM,e.QSM,e.ASM,e.CBCM,{cN:"number",b:"#[0-9A-Fa-f]+"},{cN:"meta",b:"!important"}]}}]};return{cI:!0,i:/[=\/|'\$]/,c:[e.CBCM,{cN:"selector-id",b:/#[A-Za-z0-9_-]+/},{cN:"selector-class",b:/\.[A-Za-z0-9_-]+/},{cN:"selector-attr",b:/\[/,e:/\]/,i:"$"},{cN:"selector-pseudo",b:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{b:"@(font-face|page)",l:"[a-z-]+",k:"font-face page"},{b:"@",e:"[{;]",i:/:/,c:[{cN:"keyword",b:/\w+/},{b:/\s/,eW:!0,eE:!0,r:0,c:[e.ASM,e.QSM,e.CSSNM]}]},{cN:"selector-tag",b:c,r:0},{b:"{",e:"}",i:/\S/,c:[e.CBCM,t]}]}});hljs.registerLanguage("javascript",function(e){var r="[A-Za-z$_][0-9A-Za-z$_]*",t={keyword:"in of if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const export super debugger as async await static import from as",literal:"true false null undefined NaN Infinity",built_in:"eval isFinite isNaN parseFloat parseInt decodeURI decodeURIComponent encodeURI encodeURIComponent escape unescape Object Function Boolean Error EvalError InternalError RangeError ReferenceError StopIteration SyntaxError TypeError URIError Number Math Date String RegExp Array Float32Array Float64Array Int16Array Int32Array Int8Array Uint16Array Uint32Array Uint8Array Uint8ClampedArray ArrayBuffer DataView JSON Intl arguments require module console window document Symbol Set Map WeakSet WeakMap Proxy Reflect Promise"},a={cN:"number",v:[{b:"\\b(0[bB][01]+)"},{b:"\\b(0[oO][0-7]+)"},{b:e.CNR}],r:0},n={cN:"subst",b:"\\$\\{",e:"\\}",k:t,c:[]},c={cN:"string",b:"`",e:"`",c:[e.BE,n]};n.c=[e.ASM,e.QSM,c,a,e.RM];var s=n.c.concat([e.CBCM,e.CLCM]);return{aliases:["js","jsx"],k:t,c:[{cN:"meta",r:10,b:/^\s*['"]use (strict|asm)['"]/},{cN:"meta",b:/^#!/,e:/$/},e.ASM,e.QSM,c,e.CLCM,e.CBCM,a,{b:/[{,]\s*/,r:0,c:[{b:r+"\\s*:",rB:!0,r:0,c:[{cN:"attr",b:r,r:0}]}]},{b:"("+e.RSR+"|\\b(case|return|throw)\\b)\\s*",k:"return throw case",c:[e.CLCM,e.CBCM,e.RM,{cN:"function",b:"(\\(.*?\\)|"+r+")\\s*=>",rB:!0,e:"\\s*=>",c:[{cN:"params",v:[{b:r},{b:/\(\s*\)/},{b:/\(/,e:/\)/,eB:!0,eE:!0,k:t,c:s}]}]},{b://,sL:"xml",c:[{b:/<\w+\s*\/>/,skip:!0},{b:/<\w+/,e:/(\/\w+|\w+\/)>/,skip:!0,c:[{b:/<\w+\s*\/>/,skip:!0},"self"]}]}],r:0},{cN:"function",bK:"function",e:/\{/,eE:!0,c:[e.inherit(e.TM,{b:r}),{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,c:s}],i:/\[|%/},{b:/\$[(.]/},e.METHOD_GUARD,{cN:"class",bK:"class",e:/[{;=]/,eE:!0,i:/[:"\[\]]/,c:[{bK:"extends"},e.UTM]},{bK:"constructor",e:/\{/,eE:!0}],i:/#(?!!)/}});hljs.registerLanguage("makefile",function(e){var i={cN:"variable",v:[{b:"\\$\\("+e.UIR+"\\)",c:[e.BE]},{b:/\$[@%)?(\\[\\])?";return{aliases:["csharp"],k:i,i:/::/,c:[e.C("///","$",{rB:!0,c:[{cN:"doctag",v:[{b:"///",r:0},{b:""},{b:""}]}]}),e.CLCM,e.CBCM,{cN:"meta",b:"#",e:"$",k:{"meta-keyword":"if else elif endif define undef warning error line region endregion pragma checksum"}},o,r,{bK:"class interface",e:/[{;=]/,i:/[^\s:,]/,c:[e.TM,e.CLCM,e.CBCM]},{bK:"namespace",e:/[{;=]/,i:/[^\s:]/,c:[e.inherit(e.TM,{b:"[a-zA-Z](\\.?\\w)*"}),e.CLCM,e.CBCM]},{cN:"meta",b:"^\\s*\\[",eB:!0,e:"\\]",eE:!0,c:[{cN:"meta-string",b:/"/,e:/"/}]},{bK:"new return throw await else",r:0},{cN:"function",b:"("+d+"\\s+)+"+e.IR+"\\s*\\(",rB:!0,e:/\s*[{;=]/,eE:!0,k:i,c:[{b:e.IR+"\\s*\\(",rB:!0,c:[e.TM],r:0},{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,k:i,r:0,c:[o,r,e.CBCM]},e.CLCM,e.CBCM]}]}});hljs.registerLanguage("coffeescript",function(e){var c={keyword:"in if for while finally new do return else break catch instanceof throw try this switch continue typeof delete debugger super yield import export from as default await then unless until loop of by when and or is isnt not",literal:"true false null undefined yes no on off",built_in:"npm require console print module global window document"},n="[A-Za-z$_][0-9A-Za-z$_]*",r={cN:"subst",b:/#\{/,e:/}/,k:c},i=[e.BNM,e.inherit(e.CNM,{starts:{e:"(\\s*/)?",r:0}}),{cN:"string",v:[{b:/'''/,e:/'''/,c:[e.BE]},{b:/'/,e:/'/,c:[e.BE]},{b:/"""/,e:/"""/,c:[e.BE,r]},{b:/"/,e:/"/,c:[e.BE,r]}]},{cN:"regexp",v:[{b:"///",e:"///",c:[r,e.HCM]},{b:"//[gim]*",r:0},{b:/\/(?![ *])(\\\/|.)*?\/[gim]*(?=\W|$)/}]},{b:"@"+n},{sL:"javascript",eB:!0,eE:!0,v:[{b:"```",e:"```"},{b:"`",e:"`"}]}];r.c=i;var s=e.inherit(e.TM,{b:n}),t="(\\(.*\\))?\\s*\\B[-=]>",o={cN:"params",b:"\\([^\\(]",rB:!0,c:[{b:/\(/,e:/\)/,k:c,c:["self"].concat(i)}]};return{aliases:["coffee","cson","iced"],k:c,i:/\/\*/,c:i.concat([e.C("###","###"),e.HCM,{cN:"function",b:"^\\s*"+n+"\\s*=\\s*"+t,e:"[-=]>",rB:!0,c:[s,o]},{b:/[:\(,=]\s*/,r:0,c:[{cN:"function",b:t,e:"[-=]>",rB:!0,c:[o]}]},{cN:"class",bK:"class",e:"$",i:/[:="\[\]]/,c:[{bK:"extends",eW:!0,i:/[:="\[\]]/,c:[s]},s]},{b:n+":",e:":",rB:!0,rE:!0,r:0}])}});hljs.registerLanguage("go",function(e){var t={keyword:"break default func interface select case map struct chan else goto package switch const fallthrough if range type continue for import return var go defer bool byte complex64 complex128 float32 float64 int8 int16 int32 int64 string uint8 uint16 uint32 uint64 int uint uintptr rune",literal:"true false iota nil",built_in:"append cap close complex copy imag len make new panic print println real recover delete"};return{aliases:["golang"],k:t,i:"/},t={cN:"string",c:[e.BE,i],v:[{b:'b"',e:'"'},{b:"b'",e:"'"},e.inherit(e.ASM,{i:null}),e.inherit(e.QSM,{i:null})]},a={v:[e.BNM,e.CNM]};return{aliases:["php","php3","php4","php5","php6","php7"],cI:!0,k:"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception default die require __FUNCTION__ enddeclare final try switch continue endfor endif declare unset true false trait goto instanceof insteadof __DIR__ __NAMESPACE__ yield finally",c:[e.HCM,e.C("//","$",{c:[i]}),e.C("/\\*","\\*/",{c:[{cN:"doctag",b:"@[A-Za-z]+"}]}),e.C("__halt_compiler.+?;",!1,{eW:!0,k:"__halt_compiler",l:e.UIR}),{cN:"string",b:/<<<['"]?\w+['"]?$/,e:/^\w+;?$/,c:[e.BE,{cN:"subst",v:[{b:/\$\w+/},{b:/\{\$/,e:/\}/}]}]},i,{cN:"keyword",b:/\$this\b/},c,{b:/(::|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/},{cN:"function",bK:"function",e:/[;{]/,eE:!0,i:"\\$|\\[|%",c:[e.UTM,{cN:"params",b:"\\(",e:"\\)",c:["self",c,e.CBCM,t,a]}]},{cN:"class",bK:"class interface",e:"{",eE:!0,i:/[:\(\$"]/,c:[{bK:"extends implements"},e.UTM]},{bK:"namespace",e:";",i:/[\.']/,c:[e.UTM]},{bK:"use",e:";",c:[e.UTM]},{b:"=>"},t,a]}});hljs.registerLanguage("cpp",function(t){var e={cN:"keyword",b:"\\b[a-z\\d_]*_t\\b"},r={cN:"string",v:[{b:'(u8?|U|L)?"',e:'"',i:"\\n",c:[t.BE]},{b:'(u8?|U|L)?R"\\(',e:'\\)"'},{b:"'\\\\?.",e:"'",i:"."}]},s={cN:"number",v:[{b:"\\b(0b[01']+)"},{b:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{b:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],r:0},i={cN:"meta",b:/#\s*[a-z]+\b/,e:/$/,k:{"meta-keyword":"if else elif endif define undef warning error line pragma ifdef ifndef include"},c:[{b:/\\\n/,r:0},t.inherit(r,{cN:"meta-string"}),{cN:"meta-string",b:/<[^\n>]*>/,e:/$/,i:"\\n"},t.CLCM,t.CBCM]},a=t.IR+"\\s*\\(",c={keyword:"int float while private char catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignof constexpr decltype noexcept static_assert thread_local restrict _Bool complex _Complex _Imaginary atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and or not",built_in:"std string cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap array shared_ptr abort abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr",literal:"true false nullptr NULL"},n=[e,t.CLCM,t.CBCM,s,r];return{aliases:["c","cc","h","c++","h++","hpp"],k:c,i:"",k:c,c:["self",e]},{b:t.IR+"::",k:c},{v:[{b:/=/,e:/;/},{b:/\(/,e:/\)/},{bK:"new throw return else",e:/;/}],k:c,c:n.concat([{b:/\(/,e:/\)/,k:c,c:n.concat(["self"]),r:0}]),r:0},{cN:"function",b:"("+t.IR+"[\\*&\\s]+)+"+a,rB:!0,e:/[{;=]/,eE:!0,k:c,i:/[^\w\s\*&]/,c:[{b:a,rB:!0,c:[t.TM],r:0},{cN:"params",b:/\(/,e:/\)/,k:c,r:0,c:[t.CLCM,t.CBCM,r,s,e,{b:/\(/,e:/\)/,k:c,r:0,c:["self",t.CLCM,t.CBCM,r,s,e]}]},t.CLCM,t.CBCM,i]},{cN:"class",bK:"class struct",e:/[{;:]/,c:[{b://,c:["self"]},t.TM]}]),exports:{preprocessor:i,strings:r,k:c}}});hljs.registerLanguage("properties",function(r){var t="[ \\t\\f]*",e="[ \\t\\f]+",s="("+t+"[:=]"+t+"|"+e+")",n="([^\\\\\\W:= \\t\\f\\n]|\\\\.)+",a="([^\\\\:= \\t\\f\\n]|\\\\.)+",c={e:s,r:0,starts:{cN:"string",e:/$/,r:0,c:[{b:"\\\\\\n"}]}};return{cI:!0,i:/\S/,c:[r.C("^\\s*[!#]","$"),{b:n+s,rB:!0,c:[{cN:"attr",b:n,endsParent:!0,r:0}],starts:c},{b:a+s,rB:!0,r:0,c:[{cN:"meta",b:a,endsParent:!0,r:0}],starts:c},{cN:"attr",r:0,b:a+t+"$"}]}});hljs.registerLanguage("diff",function(e){return{aliases:["patch"],c:[{cN:"meta",r:10,v:[{b:/^@@ +\-\d+,\d+ +\+\d+,\d+ +@@$/},{b:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{b:/^\-\-\- +\d+,\d+ +\-\-\-\-$/}]},{cN:"comment",v:[{b:/Index: /,e:/$/},{b:/={3,}/,e:/$/},{b:/^\-{3}/,e:/$/},{b:/^\*{3} /,e:/$/},{b:/^\+{3}/,e:/$/},{b:/\*{5}/,e:/\*{5}$/}]},{cN:"addition",b:"^\\+",e:"$"},{cN:"deletion",b:"^\\-",e:"$"},{cN:"addition",b:"^\\!",e:"$"}]}});hljs.registerLanguage("apache",function(e){var r={cN:"number",b:"[\\$%]\\d+"};return{aliases:["apacheconf"],cI:!0,c:[e.HCM,{cN:"section",b:""},{cN:"attribute",b:/\w+/,r:0,k:{nomarkup:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername"},starts:{e:/$/,r:0,k:{literal:"on off all"},c:[{cN:"meta",b:"\\s\\[",e:"\\]$"},{cN:"variable",b:"[\\$%]\\{",e:"\\}",c:["self",r]},r,e.QSM]}}],i:/\S/}});hljs.registerLanguage("rust",function(e){var t="([ui](8|16|32|64|128|size)|f(32|64))?",r="alignof as be box break const continue crate do else enum extern false fn for if impl in let loop match mod mut offsetof once priv proc pub pure ref return self Self sizeof static struct super trait true type typeof unsafe unsized use virtual while where yield move default",n="drop i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 str char bool Box Option Result String Vec Copy Send Sized Sync Drop Fn FnMut FnOnce ToOwned Clone Debug PartialEq PartialOrd Eq Ord AsRef AsMut Into From Default Iterator Extend IntoIterator DoubleEndedIterator ExactSizeIterator SliceConcatExt ToString assert! assert_eq! bitflags! bytes! cfg! col! concat! concat_idents! debug_assert! debug_assert_eq! env! panic! file! format! format_args! include_bin! include_str! line! local_data_key! module_path! option_env! print! println! select! stringify! try! unimplemented! unreachable! vec! write! writeln! macro_rules! assert_ne! debug_assert_ne!";return{aliases:["rs"],k:{keyword:r,literal:"true false Some None Ok Err",built_in:n},l:e.IR+"!?",i:""}]}});hljs.registerLanguage("tex",function(c){var e={cN:"tag",b:/\\/,r:0,c:[{cN:"name",v:[{b:/[a-zA-Zа-яА-я]+[*]?/},{b:/[^a-zA-Zа-яА-я0-9]/}],starts:{eW:!0,r:0,c:[{cN:"string",v:[{b:/\[/,e:/\]/},{b:/\{/,e:/\}/}]},{b:/\s*=\s*/,eW:!0,r:0,c:[{cN:"number",b:/-?\d*\.?\d+(pt|pc|mm|cm|in|dd|cc|ex|em)?/}]}]}}]};return{c:[e,{cN:"formula",c:[e],r:0,v:[{b:/\$\$/,e:/\$\$/},{b:/\$/,e:/\$/}]},c.C("%","$",{r:0})]}});hljs.registerLanguage("r",function(e){var r="([a-zA-Z]|\\.[a-zA-Z.])[a-zA-Z0-9._]*";return{c:[e.HCM,{b:r,l:r,k:{keyword:"function if in break next repeat else for return switch while try tryCatch stop warning require library attach detach source setMethod setGeneric setGroupGeneric setClass ...",literal:"NULL NA TRUE FALSE T F Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10"},r:0},{cN:"number",b:"0[xX][0-9a-fA-F]+[Li]?\\b",r:0},{cN:"number",b:"\\d+(?:[eE][+\\-]?\\d*)?L\\b",r:0},{cN:"number",b:"\\d+\\.(?!\\d)(?:i\\b)?",r:0},{cN:"number",b:"\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d*)?i?\\b",r:0},{cN:"number",b:"\\.\\d+(?:[eE][+\\-]?\\d*)?i?\\b",r:0},{b:"`",e:"`",r:0},{cN:"string",c:[e.BE],v:[{b:'"',e:'"'},{b:"'",e:"'"}]}]}});hljs.registerLanguage("python",function(e){var r={keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10 None True False",built_in:"Ellipsis NotImplemented"},b={cN:"meta",b:/^(>>>|\.\.\.) /},c={cN:"subst",b:/\{/,e:/\}/,k:r,i:/#/},a={cN:"string",c:[e.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[e.BE,b],r:10},{b:/(u|b)?r?"""/,e:/"""/,c:[e.BE,b],r:10},{b:/(fr|rf|f)'''/,e:/'''/,c:[e.BE,b,c]},{b:/(fr|rf|f)"""/,e:/"""/,c:[e.BE,b,c]},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)"/,e:/"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)"/,e:/"/},{b:/(fr|rf|f)'/,e:/'/,c:[e.BE,c]},{b:/(fr|rf|f)"/,e:/"/,c:[e.BE,c]},e.ASM,e.QSM]},s={cN:"number",r:0,v:[{b:e.BNR+"[lLjJ]?"},{b:"\\b(0o[0-7]+)[lLjJ]?"},{b:e.CNR+"[lLjJ]?"}]},i={cN:"params",b:/\(/,e:/\)/,c:["self",b,s,a]};return c.c=[a,s,b],{aliases:["py","gyp"],k:r,i:/(<\/|->|\?)|=>/,c:[b,s,a,e.HCM,{v:[{cN:"function",bK:"def"},{cN:"class",bK:"class"}],e:/:/,i:/[${=;\n,]/,c:[e.UTM,i,{b:/->/,eW:!0,k:"None"}]},{cN:"meta",b:/^[\t ]*@/,e:/$/},{b:/\b(print|exec)\(/}]}}); \ No newline at end of file diff --git a/docs/problems/index.html b/docs/problems/index.html index 9ccf991..7a9bcfc 100644 --- a/docs/problems/index.html +++ b/docs/problems/index.html @@ -8,7 +8,7 @@ - + Problems - MIPLearn diff --git a/docs/search/lunr.js b/docs/search/lunr.js index c218cc8..c353765 100644 --- a/docs/search/lunr.js +++ b/docs/search/lunr.js @@ -1,6 +1,6 @@ /** - * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.1.6 - * Copyright (C) 2018 Oliver Nightingale + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.8 + * Copyright (C) 2019 Oliver Nightingale * @license MIT */ @@ -54,14 +54,15 @@ var lunr = function (config) { return builder.build() } -lunr.version = "2.1.6" +lunr.version = "2.3.8" /*! * lunr.utils - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** * A namespace containing utils for the rest of the lunr library + * @namespace lunr.utils */ lunr.utils = {} @@ -69,7 +70,8 @@ lunr.utils = {} * Print a warning message to the console. * * @param {String} message The message to be printed. - * @memberOf Utils + * @memberOf lunr.utils + * @function */ lunr.utils.warn = (function (global) { /* eslint-disable no-console */ @@ -90,7 +92,7 @@ lunr.utils.warn = (function (global) { * * @param {Any} obj The object to convert to a string. * @return {String} string representation of the passed object. - * @memberOf Utils + * @memberOf lunr.utils */ lunr.utils.asString = function (obj) { if (obj === void 0 || obj === null) { @@ -99,6 +101,52 @@ lunr.utils.asString = function (obj) { return obj.toString() } } + +/** + * Clones an object. + * + * Will create a copy of an existing object such that any mutations + * on the copy cannot affect the original. + * + * Only shallow objects are supported, passing a nested object to this + * function will cause a TypeError. + * + * Objects with primitives, and arrays of primitives are supported. + * + * @param {Object} obj The object to clone. + * @return {Object} a clone of the passed object. + * @throws {TypeError} when a nested object is passed. + * @memberOf Utils + */ +lunr.utils.clone = function (obj) { + if (obj === null || obj === undefined) { + return obj + } + + var clone = Object.create(null), + keys = Object.keys(obj) + + for (var i = 0; i < keys.length; i++) { + var key = keys[i], + val = obj[key] + + if (Array.isArray(val)) { + clone[key] = val.slice() + continue + } + + if (typeof val === 'string' || + typeof val === 'number' || + typeof val === 'boolean') { + clone[key] = val + continue + } + + throw new TypeError("clone is not deep and does not support nested objects") + } + + return clone +} lunr.FieldRef = function (docRef, fieldName, stringValue) { this.docRef = docRef this.fieldName = fieldName @@ -127,6 +175,139 @@ lunr.FieldRef.prototype.toString = function () { return this._stringValue } +/*! + * lunr.Set + * Copyright (C) 2019 Oliver Nightingale + */ + +/** + * A lunr set. + * + * @constructor + */ +lunr.Set = function (elements) { + this.elements = Object.create(null) + + if (elements) { + this.length = elements.length + + for (var i = 0; i < this.length; i++) { + this.elements[elements[i]] = true + } + } else { + this.length = 0 + } +} + +/** + * A complete set that contains all elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.complete = { + intersect: function (other) { + return other + }, + + union: function (other) { + return other + }, + + contains: function () { + return true + } +} + +/** + * An empty set that contains no elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.empty = { + intersect: function () { + return this + }, + + union: function (other) { + return other + }, + + contains: function () { + return false + } +} + +/** + * Returns true if this set contains the specified object. + * + * @param {object} object - Object whose presence in this set is to be tested. + * @returns {boolean} - True if this set contains the specified object. + */ +lunr.Set.prototype.contains = function (object) { + return !!this.elements[object] +} + +/** + * Returns a new set containing only the elements that are present in both + * this set and the specified set. + * + * @param {lunr.Set} other - set to intersect with this set. + * @returns {lunr.Set} a new set that is the intersection of this and the specified set. + */ + +lunr.Set.prototype.intersect = function (other) { + var a, b, elements, intersection = [] + + if (other === lunr.Set.complete) { + return this + } + + if (other === lunr.Set.empty) { + return other + } + + if (this.length < other.length) { + a = this + b = other + } else { + a = other + b = this + } + + elements = Object.keys(a.elements) + + for (var i = 0; i < elements.length; i++) { + var element = elements[i] + if (element in b.elements) { + intersection.push(element) + } + } + + return new lunr.Set (intersection) +} + +/** + * Returns a new set combining the elements of this and the specified set. + * + * @param {lunr.Set} other - set to union with this set. + * @return {lunr.Set} a new set that is the union of this and the specified set. + */ + +lunr.Set.prototype.union = function (other) { + if (other === lunr.Set.complete) { + return lunr.Set.complete + } + + if (other === lunr.Set.empty) { + return this + } + + return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements))) +} /** * A function to calculate the inverse document frequency for * a posting. This is shared between the builder and the index @@ -208,7 +389,7 @@ lunr.Token.prototype.clone = function (fn) { } /*! * lunr.tokenizer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -220,22 +401,30 @@ lunr.Token.prototype.clone = function (fn) { * then will split this string on the character in `lunr.tokenizer.separator`. * Arrays will have their elements converted to strings and wrapped in a lunr.Token. * + * Optional metadata can be passed to the tokenizer, this metadata will be cloned and + * added as metadata to every token that is created from the object to be tokenized. + * * @static * @param {?(string|object|object[])} obj - The object to convert into tokens + * @param {?object} metadata - Optional metadata to associate with every token * @returns {lunr.Token[]} + * @see {@link lunr.Pipeline} */ -lunr.tokenizer = function (obj) { +lunr.tokenizer = function (obj, metadata) { if (obj == null || obj == undefined) { return [] } if (Array.isArray(obj)) { return obj.map(function (t) { - return new lunr.Token(lunr.utils.asString(t).toLowerCase()) + return new lunr.Token( + lunr.utils.asString(t).toLowerCase(), + lunr.utils.clone(metadata) + ) }) } - var str = obj.toString().trim().toLowerCase(), + var str = obj.toString().toLowerCase(), len = str.length, tokens = [] @@ -246,11 +435,15 @@ lunr.tokenizer = function (obj) { if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) { if (sliceLength > 0) { + var tokenMetadata = lunr.utils.clone(metadata) || {} + tokenMetadata["position"] = [sliceStart, sliceLength] + tokenMetadata["index"] = tokens.length + tokens.push( - new lunr.Token (str.slice(sliceStart, sliceEnd), { - position: [sliceStart, sliceLength], - index: tokens.length - }) + new lunr.Token ( + str.slice(sliceStart, sliceEnd), + tokenMetadata + ) ) } @@ -272,7 +465,7 @@ lunr.tokenizer = function (obj) { lunr.tokenizer.separator = /[\s\-]+/ /*! * lunr.Pipeline - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -316,8 +509,8 @@ lunr.Pipeline.registeredFunctions = Object.create(null) * or mutate (or add) metadata for a given token. * * A pipeline function can indicate that the passed token should be discarded by returning - * null. This token will not be passed to any downstream pipeline functions and will not be - * added to the index. + * null, undefined or an empty string. This token will not be passed to any downstream pipeline + * functions and will not be added to the index. * * Multiple tokens can be returned by returning an array of tokens. Each token will be passed * to any downstream pipeline functions and all will returned tokens will be added to the index. @@ -480,9 +673,9 @@ lunr.Pipeline.prototype.run = function (tokens) { for (var j = 0; j < tokens.length; j++) { var result = fn(tokens[j], j, tokens) - if (result === void 0 || result === '') continue + if (result === null || result === void 0 || result === '') continue - if (result instanceof Array) { + if (Array.isArray(result)) { for (var k = 0; k < result.length; k++) { memo.push(result[k]) } @@ -503,10 +696,12 @@ lunr.Pipeline.prototype.run = function (tokens) { * token and mapping the resulting tokens back to strings. * * @param {string} str - The string to pass through the pipeline. + * @param {?object} metadata - Optional metadata to associate with the token + * passed to the pipeline. * @returns {string[]} */ -lunr.Pipeline.prototype.runString = function (str) { - var token = new lunr.Token (str) +lunr.Pipeline.prototype.runString = function (str, metadata) { + var token = new lunr.Token (str, metadata) return this.run([token]).map(function (t) { return t.toString() @@ -537,7 +732,7 @@ lunr.Pipeline.prototype.toJSON = function () { } /*! * lunr.Vector - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -698,15 +893,14 @@ lunr.Vector.prototype.dot = function (otherVector) { } /** - * Calculates the cosine similarity between this vector and another - * vector. + * Calculates the similarity between this vector and another vector. * * @param {lunr.Vector} otherVector - The other vector to calculate the * similarity with. * @returns {Number} */ lunr.Vector.prototype.similarity = function (otherVector) { - return this.dot(otherVector) / (this.magnitude() * otherVector.magnitude()) + return this.dot(otherVector) / this.magnitude() || 0 } /** @@ -735,7 +929,7 @@ lunr.Vector.prototype.toJSON = function () { /* eslint-disable */ /*! * lunr.stemmer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt */ @@ -748,6 +942,7 @@ lunr.Vector.prototype.toJSON = function () { * @param {lunr.Token} token - The string to stem * @returns {lunr.Token} * @see {@link lunr.Pipeline} + * @function */ lunr.stemmer = (function(){ var step2list = { @@ -956,7 +1151,7 @@ lunr.stemmer = (function(){ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') /*! * lunr.stopWordFilter - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -966,6 +1161,7 @@ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') * The built in lunr.stopWordFilter is built using this generator and can be used * to generate custom stopWordFilters for applications or non English languages. * + * @function * @param {Array} token The token to pass through the filter * @returns {lunr.PipelineFunction} * @see lunr.Pipeline @@ -989,6 +1185,7 @@ lunr.generateStopWordFilter = function (stopWords) { * This is intended to be used in the Pipeline. If the token does not pass the * filter then undefined will be returned. * + * @function * @implements {lunr.PipelineFunction} * @params {lunr.Token} token - A token to check for being a stop word. * @returns {lunr.Token} @@ -1119,7 +1316,7 @@ lunr.stopWordFilter = lunr.generateStopWordFilter([ lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter') /*! * lunr.trimmer - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1146,7 +1343,7 @@ lunr.trimmer = function (token) { lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer') /*! * lunr.TokenSet - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1263,50 +1460,58 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { noEditNode.final = true - } else { - stack.push({ - node: noEditNode, - editsRemaining: frame.editsRemaining, - str: frame.str.slice(1) - }) } + + stack.push({ + node: noEditNode, + editsRemaining: frame.editsRemaining, + str: frame.str.slice(1) + }) + } + + if (frame.editsRemaining == 0) { + continue + } + + // insertion + if ("*" in frame.node.edges) { + var insertionNode = frame.node.edges["*"] + } else { + var insertionNode = new lunr.TokenSet + frame.node.edges["*"] = insertionNode } + if (frame.str.length == 0) { + insertionNode.final = true + } + + stack.push({ + node: insertionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str + }) + // deletion // can only do a deletion if we have enough edits remaining // and if there are characters left to delete in the string - if (frame.editsRemaining > 0 && frame.str.length > 1) { - var char = frame.str.charAt(1), - deletionNode - - if (char in frame.node.edges) { - deletionNode = frame.node.edges[char] - } else { - deletionNode = new lunr.TokenSet - frame.node.edges[char] = deletionNode - } - - if (frame.str.length <= 2) { - deletionNode.final = true - } else { - stack.push({ - node: deletionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str.slice(2) - }) - } + if (frame.str.length > 1) { + stack.push({ + node: frame.node, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) } // deletion // just removing the last character from the str - if (frame.editsRemaining > 0 && frame.str.length == 1) { + if (frame.str.length == 1) { frame.node.final = true } // substitution // can only do a substitution if we have enough edits remaining // and if there are characters left to substitute - if (frame.editsRemaining > 0 && frame.str.length >= 1) { + if (frame.str.length >= 1) { if ("*" in frame.node.edges) { var substitutionNode = frame.node.edges["*"] } else { @@ -1316,40 +1521,19 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { substitutionNode.final = true - } else { - stack.push({ - node: substitutionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str.slice(1) - }) } - } - // insertion - // can only do insertion if there are edits remaining - if (frame.editsRemaining > 0) { - if ("*" in frame.node.edges) { - var insertionNode = frame.node.edges["*"] - } else { - var insertionNode = new lunr.TokenSet - frame.node.edges["*"] = insertionNode - } - - if (frame.str.length == 0) { - insertionNode.final = true - } else { - stack.push({ - node: insertionNode, - editsRemaining: frame.editsRemaining - 1, - str: frame.str - }) - } + stack.push({ + node: substitutionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) } // transposition // can only do a transposition if there are edits remaining // and there are enough characters to transpose - if (frame.editsRemaining > 0 && frame.str.length > 1) { + if (frame.str.length > 1) { var charA = frame.str.charAt(0), charB = frame.str.charAt(1), transposeNode @@ -1363,13 +1547,13 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { if (frame.str.length == 1) { transposeNode.final = true - } else { - stack.push({ - node: transposeNode, - editsRemaining: frame.editsRemaining - 1, - str: charA + frame.str.slice(2) - }) } + + stack.push({ + node: transposeNode, + editsRemaining: frame.editsRemaining - 1, + str: charA + frame.str.slice(2) + }) } } @@ -1388,14 +1572,13 @@ lunr.TokenSet.fromFuzzyString = function (str, editDistance) { */ lunr.TokenSet.fromString = function (str) { var node = new lunr.TokenSet, - root = node, - wildcardFound = false + root = node /* * Iterates through all characters within the passed string * appending a node for each character. * - * As soon as a wildcard character is found then a self + * When a wildcard character is found then a self * referencing edge is introduced to continually match * any number of any characters. */ @@ -1404,7 +1587,6 @@ lunr.TokenSet.fromString = function (str) { final = (i == len - 1) if (char == "*") { - wildcardFound = true node.edges[char] = node node.final = final @@ -1414,11 +1596,6 @@ lunr.TokenSet.fromString = function (str) { node.edges[char] = next node = next - - // TODO: is this needed anymore? - if (wildcardFound) { - node.edges["*"] = root - } } } @@ -1429,6 +1606,10 @@ lunr.TokenSet.fromString = function (str) { * Converts this TokenSet into an array of strings * contained within the TokenSet. * + * This is not intended to be used on a TokenSet that + * contains wildcards, in these cases the results are + * undefined and are likely to cause an infinite loop. + * * @returns {string[]} */ lunr.TokenSet.prototype.toArray = function () { @@ -1445,6 +1626,11 @@ lunr.TokenSet.prototype.toArray = function () { len = edges.length if (frame.node.final) { + /* In Safari, at this point the prefix is sometimes corrupted, see: + * https://github.com/olivernn/lunr.js/issues/279 Calling any + * String.prototype method forces Safari to "cast" this string to what + * it's supposed to be, fixing the bug. */ + frame.prefix.charAt(0) words.push(frame.prefix) } @@ -1641,7 +1827,7 @@ lunr.TokenSet.Builder.prototype.minimize = function (downTo) { } /*! * lunr.Index - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -1655,7 +1841,7 @@ lunr.TokenSet.Builder.prototype.minimize = function (downTo) { * @constructor * @param {Object} attrs - The attributes of the built search index. * @param {Object} attrs.invertedIndex - An index of term/field to document reference. - * @param {Object} attrs.documentVectors - Document vectors keyed by document reference. + * @param {Object} attrs.fieldVectors - Field vectors * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens. * @param {string[]} attrs.fields - The names of indexed document fields. * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms. @@ -1701,6 +1887,12 @@ lunr.Index = function (attrs) { * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. * Avoid large values for edit distance to improve query performance. * + * Each term also supports a presence modifier. By default a term's presence in document is optional, however + * this can be changed to either required or prohibited. For a term's presence to be required in a document the + * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and + * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not + * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'. + * * To escape special characters the backslash character '\' can be used, this allows searches to include * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead * of attempting to apply a boost of 2 to the search term "foo". @@ -1716,13 +1908,16 @@ lunr.Index = function (attrs) { * hello^10 * @example term with an edit distance of 2 * hello~2 + * @example terms with presence modifiers + * -foo +bar baz */ /** * Performs a search against the index using lunr query syntax. * * Results will be returned sorted by their score, the most relevant results - * will be returned first. + * will be returned first. For details on how the score is calculated, please see + * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}. * * For more programmatic querying use lunr.Index#query. * @@ -1773,7 +1968,18 @@ lunr.Index.prototype.query = function (fn) { var query = new lunr.Query(this.fields), matchingFields = Object.create(null), queryVectors = Object.create(null), - termFieldCache = Object.create(null) + termFieldCache = Object.create(null), + requiredMatches = Object.create(null), + prohibitedMatches = Object.create(null) + + /* + * To support field level boosts a query vector is created per + * field. An empty vector is eagerly created to support negated + * queries. + */ + for (var i = 0; i < this.fields.length; i++) { + queryVectors[this.fields[i]] = new lunr.Vector + } fn.call(query, query) @@ -1787,10 +1993,13 @@ lunr.Index.prototype.query = function (fn) { * for a single query term. */ var clause = query.clauses[i], - terms = null + terms = null, + clauseMatches = lunr.Set.complete if (clause.usePipeline) { - terms = this.pipeline.runString(clause.term) + terms = this.pipeline.runString(clause.term, { + fields: clause.fields + }) } else { terms = [clause.term] } @@ -1814,6 +2023,21 @@ lunr.Index.prototype.query = function (fn) { var termTokenSet = lunr.TokenSet.fromClause(clause), expandedTerms = this.tokenSet.intersect(termTokenSet).toArray() + /* + * If a term marked as required does not exist in the tokenSet it is + * impossible for the search to return any matches. We set all the field + * scoped required matches set to empty and stop examining any further + * clauses. + */ + if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = lunr.Set.empty + } + + break + } + for (var j = 0; j < expandedTerms.length; j++) { /* * For each term get the posting and termIndex, this is required for @@ -1835,26 +2059,50 @@ lunr.Index.prototype.query = function (fn) { var field = clause.fields[k], fieldPosting = posting[field], matchingDocumentRefs = Object.keys(fieldPosting), - termField = expandedTerm + "/" + field + termField = expandedTerm + "/" + field, + matchingDocumentsSet = new lunr.Set(matchingDocumentRefs) /* - * To support field level boosts a query vector is created per - * field. This vector is populated using the termIndex found for - * the term and a unit value with the appropriate boost applied. + * if the presence of this term is required ensure that the matching + * documents are added to the set of required matches for this clause. * - * If the query vector for this field does not exist yet it needs - * to be created. */ - if (queryVectors[field] === undefined) { - queryVectors[field] = new lunr.Vector + if (clause.presence == lunr.Query.presence.REQUIRED) { + clauseMatches = clauseMatches.union(matchingDocumentsSet) + + if (requiredMatches[field] === undefined) { + requiredMatches[field] = lunr.Set.complete + } + } + + /* + * if the presence of this term is prohibited ensure that the matching + * documents are added to the set of prohibited matches for this field, + * creating that set if it does not yet exist. + */ + if (clause.presence == lunr.Query.presence.PROHIBITED) { + if (prohibitedMatches[field] === undefined) { + prohibitedMatches[field] = lunr.Set.empty + } + + prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet) + + /* + * Prohibited matches should not be part of the query vector used for + * similarity scoring and no metadata should be extracted so we continue + * to the next field + */ + continue } /* + * The query field vector is populated using the termIndex found for + * the term and a unit value with the appropriate boost applied. * Using upsert because there could already be an entry in the vector * for the term we are working with. In that case we just add the scores * together. */ - queryVectors[field].upsert(termIndex, 1 * clause.boost, function (a, b) { return a + b }) + queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b }) /** * If we've already seen this term, field combo then we've already collected @@ -1888,12 +2136,65 @@ lunr.Index.prototype.query = function (fn) { } } } + + /** + * If the presence was required we need to update the requiredMatches field sets. + * We do this after all fields for the term have collected their matches because + * the clause terms presence is required in _any_ of the fields not _all_ of the + * fields. + */ + if (clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = requiredMatches[field].intersect(clauseMatches) + } + } + } + + /** + * Need to combine the field scoped required and prohibited + * matching documents into a global set of required and prohibited + * matches + */ + var allRequiredMatches = lunr.Set.complete, + allProhibitedMatches = lunr.Set.empty + + for (var i = 0; i < this.fields.length; i++) { + var field = this.fields[i] + + if (requiredMatches[field]) { + allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field]) + } + + if (prohibitedMatches[field]) { + allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field]) + } } var matchingFieldRefs = Object.keys(matchingFields), results = [], matches = Object.create(null) + /* + * If the query is negated (contains only prohibited terms) + * we need to get _all_ fieldRefs currently existing in the + * index. This is only done when we know that the query is + * entirely prohibited terms to avoid any cost of getting all + * fieldRefs unnecessarily. + * + * Additionally, blank MatchData must be created to correctly + * populate the results. + */ + if (query.isNegated()) { + matchingFieldRefs = Object.keys(this.fieldVectors) + + for (var i = 0; i < matchingFieldRefs.length; i++) { + var matchingFieldRef = matchingFieldRefs[i] + var fieldRef = lunr.FieldRef.fromString(matchingFieldRef) + matchingFields[matchingFieldRef] = new lunr.MatchData + } + } + for (var i = 0; i < matchingFieldRefs.length; i++) { /* * Currently we have document fields that match the query, but we @@ -1904,8 +2205,17 @@ lunr.Index.prototype.query = function (fn) { * above, and combined into a final document score using addition. */ var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]), - docRef = fieldRef.docRef, - fieldVector = this.fieldVectors[fieldRef], + docRef = fieldRef.docRef + + if (!allRequiredMatches.contains(docRef)) { + continue + } + + if (allProhibitedMatches.contains(docRef)) { + continue + } + + var fieldVector = this.fieldVectors[fieldRef], score = queryVectors[fieldRef.fieldName].similarity(fieldVector), docMatch @@ -1970,7 +2280,7 @@ lunr.Index.load = function (serializedIndex) { var attrs = {}, fieldVectors = {}, serializedVectors = serializedIndex.fieldVectors, - invertedIndex = {}, + invertedIndex = Object.create(null), serializedInvertedIndex = serializedIndex.invertedIndex, tokenSetBuilder = new lunr.TokenSet.Builder, pipeline = lunr.Pipeline.load(serializedIndex.pipeline) @@ -2009,7 +2319,7 @@ lunr.Index.load = function (serializedIndex) { } /*! * lunr.Builder - * Copyright (C) 2018 Oliver Nightingale + * Copyright (C) 2019 Oliver Nightingale */ /** @@ -2038,7 +2348,8 @@ lunr.Index.load = function (serializedIndex) { */ lunr.Builder = function () { this._ref = "id" - this._fields = [] + this._fields = Object.create(null) + this._documents = Object.create(null) this.invertedIndex = Object.create(null) this.fieldTermFrequencies = {} this.fieldLengths = {} @@ -2068,6 +2379,20 @@ lunr.Builder.prototype.ref = function (ref) { this._ref = ref } +/** + * A function that is used to extract a field from a document. + * + * Lunr expects a field to be at the top level of a document, if however the field + * is deeply nested within a document an extractor function can be used to extract + * the right field for indexing. + * + * @callback fieldExtractor + * @param {object} doc - The document being added to the index. + * @returns {?(string|object|object[])} obj - The object that will be indexed for this field. + * @example Extracting a nested field + * function (doc) { return doc.nested.field } + */ + /** * Adds a field to the list of document fields that will be indexed. Every document being * indexed should have this field. Null values for this field in indexed documents will @@ -2076,10 +2401,22 @@ lunr.Builder.prototype.ref = function (ref) { * All fields should be added before adding documents to the index. Adding fields after * a document has been indexed will have no effect on already indexed documents. * - * @param {string} field - The name of a field to index in all documents. + * Fields can be boosted at build time. This allows terms within that field to have more + * importance when ranking search results. Use a field boost to specify that matches within + * one field are more important than other fields. + * + * @param {string} fieldName - The name of a field to index in all documents. + * @param {object} attributes - Optional attributes associated with this field. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this field. + * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document. + * @throws {RangeError} fieldName cannot contain unsupported characters '/' */ -lunr.Builder.prototype.field = function (field) { - this._fields.push(field) +lunr.Builder.prototype.field = function (fieldName, attributes) { + if (/\//.test(fieldName)) { + throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'") + } + + this._fields[fieldName] = attributes || {} } /** @@ -2121,17 +2458,27 @@ lunr.Builder.prototype.k1 = function (number) { * it should have all fields defined for indexing, though null or undefined values will not * cause errors. * + * Entire documents can be boosted at build time. Applying a boost to a document indicates that + * this document should rank higher in search results than other documents. + * * @param {object} doc - The document to add to the index. + * @param {object} attributes - Optional attributes associated with this document. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this document. */ -lunr.Builder.prototype.add = function (doc) { - var docRef = doc[this._ref] +lunr.Builder.prototype.add = function (doc, attributes) { + var docRef = doc[this._ref], + fields = Object.keys(this._fields) + this._documents[docRef] = attributes || {} this.documentCount += 1 - for (var i = 0; i < this._fields.length; i++) { - var fieldName = this._fields[i], - field = doc[fieldName], - tokens = this.tokenizer(field), + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i], + extractor = this._fields[fieldName].extractor, + field = extractor ? extractor(doc) : doc[fieldName], + tokens = this.tokenizer(field, { + fields: [fieldName] + }), terms = this.pipeline.run(tokens), fieldRef = new lunr.FieldRef (docRef, fieldName), fieldTerms = Object.create(null) @@ -2159,8 +2506,8 @@ lunr.Builder.prototype.add = function (doc) { posting["_index"] = this.termIndex this.termIndex += 1 - for (var k = 0; k < this._fields.length; k++) { - posting[this._fields[k]] = Object.create(null) + for (var k = 0; k < fields.length; k++) { + posting[fields[k]] = Object.create(null) } this.invertedIndex[term] = posting @@ -2211,9 +2558,11 @@ lunr.Builder.prototype.calculateAverageFieldLengths = function () { accumulator[field] += this.fieldLengths[fieldRef] } - for (var i = 0; i < this._fields.length; i++) { - var field = this._fields[i] - accumulator[field] = accumulator[field] / documentsWithField[field] + var fields = Object.keys(this._fields) + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i] + accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName] } this.averageFieldLength = accumulator @@ -2232,13 +2581,17 @@ lunr.Builder.prototype.createFieldVectors = function () { for (var i = 0; i < fieldRefsLength; i++) { var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), - field = fieldRef.fieldName, + fieldName = fieldRef.fieldName, fieldLength = this.fieldLengths[fieldRef], fieldVector = new lunr.Vector, termFrequencies = this.fieldTermFrequencies[fieldRef], terms = Object.keys(termFrequencies), termsLength = terms.length + + var fieldBoost = this._fields[fieldName].boost || 1, + docBoost = this._documents[fieldRef.docRef].boost || 1 + for (var j = 0; j < termsLength; j++) { var term = terms[j], tf = termFrequencies[term], @@ -2252,7 +2605,9 @@ lunr.Builder.prototype.createFieldVectors = function () { idf = termIdfCache[term] } - score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[field])) + tf) + score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf) + score *= fieldBoost + score *= docBoost scoreWithPrecision = Math.round(score * 1000) / 1000 // Converts 1.23456789 to 1.234. // Reducing the precision so that the vectors take up less @@ -2298,7 +2653,7 @@ lunr.Builder.prototype.build = function () { invertedIndex: this.invertedIndex, fieldVectors: this.fieldVectors, tokenSet: this.tokenSet, - fields: this._fields, + fields: Object.keys(this._fields), pipeline: this.searchPipeline }) } @@ -2336,7 +2691,7 @@ lunr.Builder.prototype.use = function (fn) { */ lunr.MatchData = function (term, field, metadata) { var clonedMetadata = Object.create(null), - metadataKeys = Object.keys(metadata) + metadataKeys = Object.keys(metadata || {}) // Cloning the metadata to prevent the original // being mutated during match data combination. @@ -2349,8 +2704,11 @@ lunr.MatchData = function (term, field, metadata) { } this.metadata = Object.create(null) - this.metadata[term] = Object.create(null) - this.metadata[term][field] = clonedMetadata + + if (term !== undefined) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = clonedMetadata + } } /** @@ -2465,11 +2823,42 @@ lunr.Query = function (allFields) { * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING * }) */ + lunr.Query.wildcard = new String ("*") lunr.Query.wildcard.NONE = 0 lunr.Query.wildcard.LEADING = 1 lunr.Query.wildcard.TRAILING = 2 +/** + * Constants for indicating what kind of presence a term must have in matching documents. + * + * @constant + * @enum {number} + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with required presence + * query.term('foo', { presence: lunr.Query.presence.REQUIRED }) + */ +lunr.Query.presence = { + /** + * Term's presence in a document is optional, this is the default value. + */ + OPTIONAL: 1, + + /** + * Term's presence in a document is required, documents that do not contain + * this term will not be returned. + */ + REQUIRED: 2, + + /** + * Term's presence in a document is prohibited, documents that do contain + * this term will not be returned. + */ + PROHIBITED: 3 +} + /** * A single clause in a {@link lunr.Query} contains a term and details on how to * match that term against a {@link lunr.Index}. @@ -2479,7 +2868,8 @@ lunr.Query.wildcard.TRAILING = 2 * @property {number} [boost=1] - Any boost that should be applied when matching this clause. * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. - * @property {number} [wildcard=0] - Whether the term should have wildcards appended or prepended. + * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended. + * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents. */ /** @@ -2517,17 +2907,44 @@ lunr.Query.prototype.clause = function (clause) { clause.term = "" + clause.term + "*" } + if (!('presence' in clause)) { + clause.presence = lunr.Query.presence.OPTIONAL + } + this.clauses.push(clause) return this } +/** + * A negated query is one in which every clause has a presence of + * prohibited. These queries require some special processing to return + * the expected results. + * + * @returns boolean + */ +lunr.Query.prototype.isNegated = function () { + for (var i = 0; i < this.clauses.length; i++) { + if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) { + return false + } + } + + return true +} + /** * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} * to the list of clauses that make up this query. * - * @param {string} term - The term to add to the query. - * @param {Object} [options] - Any additional properties to add to the query clause. + * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion + * to a token or token-like string should be done before calling this method. + * + * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an + * array, each term in the array will share the same options. + * + * @param {object|object[]} term - The term(s) to add to the query. + * @param {object} [options] - Any additional properties to add to the query clause. * @returns {lunr.Query} * @see lunr.Query#clause * @see lunr.Query~Clause @@ -2539,10 +2956,17 @@ lunr.Query.prototype.clause = function (clause) { * boost: 10, * wildcard: lunr.Query.wildcard.TRAILING * }) + * @example using lunr.tokenizer to convert a string to tokens before using them as terms + * query.term(lunr.tokenizer("foo bar")) */ lunr.Query.prototype.term = function (term, options) { + if (Array.isArray(term)) { + term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this) + return this + } + var clause = options || {} - clause.term = term + clause.term = term.toString() this.clause(clause) @@ -2654,6 +3078,7 @@ lunr.QueryLexer.FIELD = 'FIELD' lunr.QueryLexer.TERM = 'TERM' lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE' lunr.QueryLexer.BOOST = 'BOOST' +lunr.QueryLexer.PRESENCE = 'PRESENCE' lunr.QueryLexer.lexField = function (lexer) { lexer.backup() @@ -2742,6 +3167,22 @@ lunr.QueryLexer.lexText = function (lexer) { return lunr.QueryLexer.lexBoost } + // "+" indicates term presence is required + // checking for length to ensure that only + // leading "+" are considered + if (char == "+" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + // "-" indicates term presence is prohibited + // checking for length to ensure that only + // leading "-" are considered + if (char == "-" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + if (char.match(lunr.QueryLexer.termSeparator)) { return lunr.QueryLexer.lexTerm } @@ -2759,7 +3200,7 @@ lunr.QueryParser.prototype.parse = function () { this.lexer.run() this.lexemes = this.lexer.lexemes - var state = lunr.QueryParser.parseFieldOrTerm + var state = lunr.QueryParser.parseClause while (state) { state = state(this) @@ -2784,7 +3225,7 @@ lunr.QueryParser.prototype.nextClause = function () { this.currentClause = {} } -lunr.QueryParser.parseFieldOrTerm = function (parser) { +lunr.QueryParser.parseClause = function (parser) { var lexeme = parser.peekLexeme() if (lexeme == undefined) { @@ -2792,6 +3233,8 @@ lunr.QueryParser.parseFieldOrTerm = function (parser) { } switch (lexeme.type) { + case lunr.QueryLexer.PRESENCE: + return lunr.QueryParser.parsePresence case lunr.QueryLexer.FIELD: return lunr.QueryParser.parseField case lunr.QueryLexer.TERM: @@ -2807,6 +3250,43 @@ lunr.QueryParser.parseFieldOrTerm = function (parser) { } } +lunr.QueryParser.parsePresence = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.str) { + case "-": + parser.currentClause.presence = lunr.Query.presence.PROHIBITED + break + case "+": + parser.currentClause.presence = lunr.Query.presence.REQUIRED + break + default: + var errorMessage = "unrecognised presence operator'" + lexeme.str + "'" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term or field, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + lunr.QueryParser.parseField = function (parser) { var lexeme = parser.consumeLexeme() @@ -2870,6 +3350,9 @@ lunr.QueryParser.parseTerm = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) @@ -2910,6 +3393,9 @@ lunr.QueryParser.parseEditDistance = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) @@ -2950,6 +3436,9 @@ lunr.QueryParser.parseBoost = function (parser) { return lunr.QueryParser.parseEditDistance case lunr.QueryLexer.BOOST: return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence default: var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) diff --git a/docs/search/search_index.json b/docs/search/search_index.json index 41bef43..7e09197 100644 --- a/docs/search/search_index.json +++ b/docs/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"MIPLearn MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details). Features MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized. Documentation Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgements Souce Code https://github.com/ANL-CEEESA/MIPLearn","title":"Home"},{"location":"#miplearn","text":"MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details).","title":"MIPLearn"},{"location":"#features","text":"MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized.","title":"Features"},{"location":"#documentation","text":"Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgements","title":"Documentation"},{"location":"#souce-code","text":"https://github.com/ANL-CEEESA/MIPLearn","title":"Souce Code"},{"location":"about/","text":"About Authors Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov > Acknowledgements Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357. References Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696 License MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"About"},{"location":"about/#about","text":"","title":"About"},{"location":"about/#authors","text":"Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov >","title":"Authors"},{"location":"about/#acknowledgements","text":"Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.","title":"Acknowledgements"},{"location":"about/#references","text":"Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696","title":"References"},{"location":"about/#license","text":"MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"License"},{"location":"benchmark/","text":"Benchmarks Utilities Using BenchmarkRunner MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2. Saving and loading benchmark results When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Benchmark"},{"location":"benchmark/#benchmarks-utilities","text":"","title":"Benchmarks Utilities"},{"location":"benchmark/#using-benchmarkrunner","text":"MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.","title":"Using BenchmarkRunner"},{"location":"benchmark/#saving-and-loading-benchmark-results","text":"When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Saving and loading benchmark results"},{"location":"customization/","text":"Customization Selecting the internal MIP solver By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3) Selecting solver components LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Customization"},{"location":"customization/#customization","text":"","title":"Customization"},{"location":"customization/#selecting-the-internal-mip-solver","text":"By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3)","title":"Selecting the internal MIP solver"},{"location":"customization/#selecting-solver-components","text":"LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Selecting solver components"},{"location":"problems/","text":"Benchmark Problems, Challenges and Results MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters. Preliminaries Benchmark challenges When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from. Baseline results To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time. Maximum Weight Stable Set Problem Problem definition Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems. Random instance generator The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way. Challenge A Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True) Traveling Salesman Problem Problem definition Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems. Random problem generator The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled. Challenge A Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, ) Multidimensional 0-1 Knapsack Problem Problem definition Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*} Random instance generator The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21. Challenge A 250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Problems"},{"location":"problems/#benchmark-problems-challenges-and-results","text":"MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters.","title":"Benchmark Problems, Challenges and Results"},{"location":"problems/#preliminaries","text":"","title":"Preliminaries"},{"location":"problems/#benchmark-challenges","text":"When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.","title":"Benchmark challenges"},{"location":"problems/#baseline-results","text":"To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.","title":"Baseline results"},{"location":"problems/#maximum-weight-stable-set-problem","text":"","title":"Maximum Weight Stable Set Problem"},{"location":"problems/#problem-definition","text":"Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-instance-generator","text":"The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.","title":"Random instance generator"},{"location":"problems/#challenge-a","text":"Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True)","title":"Challenge A"},{"location":"problems/#traveling-salesman-problem","text":"","title":"Traveling Salesman Problem"},{"location":"problems/#problem-definition_1","text":"Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-problem-generator","text":"The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled.","title":"Random problem generator"},{"location":"problems/#challenge-a_1","text":"Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, )","title":"Challenge A"},{"location":"problems/#multidimensional-0-1-knapsack-problem","text":"","title":"Multidimensional 0-1 Knapsack Problem"},{"location":"problems/#problem-definition_2","text":"Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*}","title":"Problem definition"},{"location":"problems/#random-instance-generator_1","text":"The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21.","title":"Random instance generator"},{"location":"problems/#challenge-a_2","text":"250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Challenge A"},{"location":"usage/","text":"Usage Installation MIPLearn is mainly written in Python, with some components written in Julia. For this reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and its Python bindings must be properly installed. Currently supported solvers include CPLEX and Gurobi. To install MIPLearn, run the following commands: git clone https://github.com/ANL-CEEESA/MIPLearn.git cd MIPLearn make install After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Note To install MIPLearn in another Python environment, switch to that enviroment before running make install . To install the package in development mode, run make develop instead. Using LearningSolver The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process. Describing problem instances Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance. Obtaining heuristic solutions By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible. Saving and loading solver state After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance) Solving training instances in parallel In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances) Current Limitations Only binary and continuous decision variables are currently supported.","title":"Usage"},{"location":"usage/#usage","text":"","title":"Usage"},{"location":"usage/#installation","text":"MIPLearn is mainly written in Python, with some components written in Julia. For this reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and its Python bindings must be properly installed. Currently supported solvers include CPLEX and Gurobi. To install MIPLearn, run the following commands: git clone https://github.com/ANL-CEEESA/MIPLearn.git cd MIPLearn make install After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Note To install MIPLearn in another Python environment, switch to that enviroment before running make install . To install the package in development mode, run make develop instead.","title":"Installation"},{"location":"usage/#using-learningsolver","text":"The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process.","title":"Using LearningSolver"},{"location":"usage/#describing-problem-instances","text":"Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance.","title":"Describing problem instances"},{"location":"usage/#obtaining-heuristic-solutions","text":"By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.","title":"Obtaining heuristic solutions"},{"location":"usage/#saving-and-loading-solver-state","text":"After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance)","title":"Saving and loading solver state"},{"location":"usage/#solving-training-instances-in-parallel","text":"In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances)","title":"Solving training instances in parallel"},{"location":"usage/#current-limitations","text":"Only binary and continuous decision variables are currently supported.","title":"Current Limitations"}]} \ No newline at end of file +{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"MIPLearn MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details). Features MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized. Documentation Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgements Souce Code https://github.com/ANL-CEEESA/MIPLearn","title":"Home"},{"location":"#miplearn","text":"MIPLearn is an extensible framework for Learning-Enhanced Mixed-Integer Optimization , an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data. The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see benchmark results and references for more details).","title":"MIPLearn"},{"location":"#features","text":"MIPLearn proposes a flexible problem specification format, which allows users to describe their particular optimization problems to a Learning-Enhanced MIP solver, both from the MIP perspective and from the ML perspective, without making any assumptions on the problem being modeled, the mathematical formulation of the problem, or ML encoding. While the format is very flexible, some constraints are enforced to ensure that it is usable by an actual solver. MIPLearn provides a reference implementation of a Learning-Enhanced Solver , which can use the above problem specification format to automatically predict, based on previously solved instances, a number of hints to accelerate MIP performance. Currently, the reference solver is able to predict: (i) partial solutions which are likely to work well as MIP starts; (ii) an initial set of lazy constraints to enforce; (iii) variable branching priorities to accelerate the exploration of the branch-and-bound tree; (iv) the optimal objective value based on the solution to the LP relaxation. The usage of the solver is very straightforward. The most suitable ML models are automatically selected, trained, cross-validated and applied to the problem with no user intervention. MIPLearn provides a set of benchmark problems and random instance generators, covering applications from different domains, which can be used to quickly evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. MIPLearn is customizable and extensible . For MIP and ML researchers exploring new techniques to accelerate MIP performance based on historical data, each component of the reference solver can be individually replaced, extended or customized.","title":"Features"},{"location":"#documentation","text":"Installation and typical usage Benchmark utilities Benchmark problems, challenges and results Customizing the solver License, authors, references and acknowledgements","title":"Documentation"},{"location":"#souce-code","text":"https://github.com/ANL-CEEESA/MIPLearn","title":"Souce Code"},{"location":"about/","text":"About Authors Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov > Acknowledgements Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357. References Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696 License MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"About"},{"location":"about/#about","text":"","title":"About"},{"location":"about/#authors","text":"Alinson S. Xavier, Argonne National Laboratory < axavier@anl.gov > Feng Qiu, Argonne National Laboratory < fqiu@anl.gov >","title":"Authors"},{"location":"about/#acknowledgements","text":"Based upon work supported by Laboratory Directed Research and Development (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.","title":"Acknowledgements"},{"location":"about/#references","text":"Learning to Solve Large-Scale Security-Constrained Unit Commitment Problems. Alinson S. Xavier, Feng Qiu, Shabbir Ahmed . INFORMS Journal on Computing (to appear). ArXiv:1902:01696","title":"References"},{"location":"about/#license","text":"MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization Copyright \u00a9 2020, UChicago Argonne, LLC. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"License"},{"location":"benchmark/","text":"Benchmarks Utilities Using BenchmarkRunner MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2. Saving and loading benchmark results When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Benchmark"},{"location":"benchmark/#benchmarks-utilities","text":"","title":"Benchmarks Utilities"},{"location":"benchmark/#using-benchmarkrunner","text":"MIPLearn provides the utility class BenchmarkRunner , which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage: from miplearn import BenchmarkRunner, LearningSolver # Create train and test instances train_instances = [...] test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { \"Baseline\": LearningSolver(...), # each solver may have different parameters \"Strategy A\": LearningSolver(...), \"Strategy B\": LearningSolver(...), \"Strategy C\": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) The method fit trains the ML models for each individual solver. The method parallel_solve solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results produces a table of results (Pandas DataFrame) with the following columns: Solver, the name of the solver. Instance, the sequence number identifying the instance. Wallclock Time, the wallclock running time (in seconds) spent by the solver; Lower Bound, the best lower bound obtained by the solver; Upper Bound, the best upper bound obtained by the solver; Gap, the relative MIP integrality gap at the end of the optimization; Nodes, the number of explored branch-and-bound nodes. In addition to the above, there is also a \"Relative\" version of most columns, where the raw number is compared to the solver which provided the best performance. The Relative Wallclock Time for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.","title":"Using BenchmarkRunner"},{"location":"benchmark/#saving-and-loading-benchmark-results","text":"When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner provides the methods save_results and load_results , which can be used to avoid this repetition, as the next example shows: # Benchmark baseline solvers and save results to a file. benchmark = BenchmarkRunner(baseline_solvers) benchmark.parallel_solve(test_instances) benchmark.save_results(\"baseline_results.csv\") # Benchmark remaining solvers, loading baseline results from file. benchmark = BenchmarkRunner(alternative_solvers) benchmark.load_results(\"baseline_results.csv\") benchmark.fit(training_instances) benchmark.parallel_solve(test_instances)","title":"Saving and loading benchmark results"},{"location":"customization/","text":"Customization Selecting the internal MIP solver By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3) Selecting solver components LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Customization"},{"location":"customization/#customization","text":"","title":"Customization"},{"location":"customization/#selecting-the-internal-mip-solver","text":"By default, LearningSolver uses Gurobi as its internal MIP solver. Another supported solver is IBM ILOG CPLEX . To switch between solvers, use the solver constructor argument, as shown below. It is also possible to specify a time limit (in seconds) and a relative MIP gap tolerance. from miplearn import LearningSolver solver = LearningSolver(solver=\"cplex\", time_limit=300, gap_tolerance=1e-3)","title":"Selecting the internal MIP solver"},{"location":"customization/#selecting-solver-components","text":"LearningSolver is composed by a number of individual machine-learning components, each targeting a different part of the solution process. Each component can be individually enabled, disabled or customized. The following components are enabled by default: LazyConstraintComponent : Predicts which lazy constraint to initially enforce. ObjectiveValueComponent : Predicts the optimal value of the optimization problem, given the optimal solution to the LP relaxation. PrimalSolutionComponent : Predicts optimal values for binary decision variables. In heuristic mode, this component fixes the variables to their predicted values. In exact mode, the predicted values are provided to the solver as a (partial) MIP start. The following components are also available, but not enabled by default: BranchPriorityComponent : Predicts good branch priorities for decision variables. To create a LearningSolver with a specific set of components, the components constructor argument may be used, as the next example shows: # Create a solver without any components solver1 = LearningSolver(components=[]) # Create a solver with only two components solver2 = LearningSolver(components=[ LazyConstraintComponent(...), PrimalSolutionComponent(...), ]) It is also possible to add components to an existing solver using the solver.add method, as shown below. If the solver already holds another component of that type, the new component will replace the previous one. # Create solver with default components solver = LearningSolver() # Replace the default LazyConstraintComponent by one with custom parameters solver.add(LazyConstraintComponent(...))","title":"Selecting solver components"},{"location":"problems/","text":"Benchmark Problems, Challenges and Results MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters. Preliminaries Benchmark challenges When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from. Baseline results To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time. Maximum Weight Stable Set Problem Problem definition Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems. Random instance generator The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way. Challenge A Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True) Traveling Salesman Problem Problem definition Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems. Random problem generator The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled. Challenge A Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, ) Multidimensional 0-1 Knapsack Problem Problem definition Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*} Random instance generator The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21. Challenge A 250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Problems"},{"location":"problems/#benchmark-problems-challenges-and-results","text":"MIPLearn provides a selection of benchmark problems and random instance generators, covering applications from different fields, that can be used to evaluate new learning-enhanced MIP techniques in a measurable and reproducible way. In this page, we describe these problems, the included instance generators, and we present some benchmark results for LearningSolver with default parameters.","title":"Benchmark Problems, Challenges and Results"},{"location":"problems/#preliminaries","text":"","title":"Preliminaries"},{"location":"problems/#benchmark-challenges","text":"When evaluating the performance of a conventional MIP solver, benchmark sets , such as MIPLIB and TSPLIB, are typically used. The performance of newly proposed solvers or solution techniques are typically measured as the average (or total) running time the solver takes to solve the entire benchmark set. For Learning-Enhanced MIP solvers, it is also necessary to specify what instances should the solver be trained on (the training instances ) before solving the actual set of instances we are interested in (the test instances ). If the training instances are very similar to the test instances, we would expect a Learning-Enhanced Solver to present stronger perfomance benefits. In MIPLearn, each optimization problem comes with a set of benchmark challenges , which specify how should the training and test instances be generated. The first challenges are typically easier, in the sense that training and test instances are very similar. Later challenges gradually make the sets more distinct, and therefore harder to learn from.","title":"Benchmark challenges"},{"location":"problems/#baseline-results","text":"To illustrate the performance of LearningSolver , and to set a baseline for newly proposed techniques, we present in this page, for each benchmark challenge, a small set of computational results measuring the solution speed of the solver and the solution quality with default parameters. For more detailed computational studies, see references . We compare three solvers: baseline: Gurobi 9.0 with default settings (a conventional state-of-the-art MIP solver) ml-exact: LearningSolver with default settings, using Gurobi 9.0 as internal MIP solver ml-heuristic: Same as above, but with mode=\"heuristic\" All experiments presented here were performed on a Linux server (Ubuntu Linux 18.04 LTS) with Intel Xeon Gold 6230s (2 processors, 40 cores, 80 threads) and 256 GB RAM (DDR4, 2933 MHz). All solvers were restricted to use 4 threads, with no time limits, and 10 instances were solved simultaneously at a time.","title":"Baseline results"},{"location":"problems/#maximum-weight-stable-set-problem","text":"","title":"Maximum Weight Stable Set Problem"},{"location":"problems/#problem-definition","text":"Given a simple undirected graph $G=(V,E)$ and weights $w \\in \\mathbb{R}^V$, the problem is to find a stable set $S \\subseteq V$ that maximizes $ \\sum_{v \\in V} w_v$. We recall that a subset $S \\subseteq V$ is a stable set if no two vertices of $S$ are adjacent. This is one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-instance-generator","text":"The class MaxWeightStableSetGenerator can be used to generate random instances of this problem, with user-specified probability distributions. When the constructor parameter fix_graph=True is provided, one random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ is generated during the constructor, where $n$ and $p$ are sampled from user-provided probability distributions n and p . To generate each instance, the generator independently samples each $w_v$ from the user-provided probability distribution w . When fix_graph=False , a new random graph is generated for each instance, while the remaining parameters are sampled in the same way.","title":"Random instance generator"},{"location":"problems/#challenge-a","text":"Fixed random Erd\u0151s-R\u00e9nyi graph $G_{n,p}$ with $n=200$ and $p=5\\%$ Random vertex weights $w_v \\sim U(100, 150)$ 500 training instances, 50 test instances MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.), n=randint(low=200, high=201), p=uniform(loc=0.05, scale=0.0), fix_graph=True)","title":"Challenge A"},{"location":"problems/#traveling-salesman-problem","text":"","title":"Traveling Salesman Problem"},{"location":"problems/#problem-definition_1","text":"Given a list of cities and the distance between each pair of cities, the problem asks for the shortest route starting at the first city, visiting each other city exactly once, then returning to the first city. This problem is a generalization of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.","title":"Problem definition"},{"location":"problems/#random-problem-generator","text":"The class TravelingSalesmanGenerator can be used to generate random instances of this problem. Initially, the generator creates $n$ cities $(x_1,y_1),\\ldots,(x_n,y_n) \\in \\mathbb{R}^2$, where $n, x_i$ and $y_i$ are sampled independently from the provided probability distributions n , x and y . For each pair of cities $(i,j)$, the distance $d_{i,j}$ between them is set to: d_{i,j} = \\gamma_{i,j} \\sqrt{(x_i-x_j)^2 + (y_i - y_j)^2} where $\\gamma_{i,j}$ is sampled from the distribution gamma . If fix_cities=True is provided, the list of cities is kept the same for all generated instances. The $gamma$ values, and therefore also the distances, are still different. By default, all distances $d_{i,j}$ are rounded to the nearest integer. If round=False is provided, this rounding will be disabled.","title":"Random problem generator"},{"location":"problems/#challenge-a_1","text":"Fixed list of 350 cities in the $[0, 1000]^2$ square $\\gamma_{i,j} \\sim U(0.95, 1.05)$ 500 training instances, 50 test instances TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), n=randint(low=350, high=351), gamma=uniform(loc=0.95, scale=0.1), fix_cities=True, round=True, )","title":"Challenge A"},{"location":"problems/#multidimensional-0-1-knapsack-problem","text":"","title":"Multidimensional 0-1 Knapsack Problem"},{"location":"problems/#problem-definition_2","text":"Given a set of $n$ items and $m$ types of resources (also called knapsacks ), the problem is to find a subset of items that maximizes profit without consuming more resources than it is available. More precisely, the problem is: \\begin{align*} \\text{maximize} & \\sum_{j=1}^n p_j x_j \\\\ \\text{subject to} & \\sum_{j=1}^n w_{ij} x_j \\leq b_i & \\forall i=1,\\ldots,m \\\\ & x_j \\in \\{0,1\\} & \\forall j=1,\\ldots,n \\end{align*}","title":"Problem definition"},{"location":"problems/#random-instance-generator_1","text":"The class MultiKnapsackGenerator can be used to generate random instances of this problem. The number of items $n$ and knapsacks $m$ are sampled from the user-provided probability distributions n and m . The weights $w_{ij}$ are sampled independently from the provided distribution w . The capacity of knapsack $i$ is set to b_i = \\alpha_i \\sum_{j=1}^n w_{ij} where $\\alpha_i$, the tightness ratio, is sampled from the provided probability distribution alpha . To make the instances more challenging, the costs of the items are linearly correlated to their average weights. More specifically, the price of each item $j$ is set to: p_j = \\sum_{i=1}^m \\frac{w_{ij}}{m} + K u_j, where $K$, the correlation coefficient, and $u_j$, the correlation multiplier, are sampled from the provided probability distributions K and u . If fix_w=True is provided, then $w_{ij}$ are kept the same in all generated instances. This also implies that $n$ and $m$ are kept fixed. Although the prices and capacities are derived from $w_{ij}$, as long as u and K are not constants, the generated instances will still not be completely identical. If a probability distribution w_jitter is provided, then item weights will be set to $w_{ij} \\gamma_{ij}$ where $\\gamma_{ij}$ is sampled from w_jitter . When combined with fix_w=True , this argument may be used to generate instances where the weight of each item is roughly the same, but not exactly identical, across all instances. The prices of the items and the capacities of the knapsacks will be calculated as above, but using these perturbed weights instead. By default, all generated prices, weights and capacities are rounded to the nearest integer number. If round=False is provided, this rounding will be disabled. References Freville, Arnaud, and G\u00e9rard Plateau. An efficient preprocessing procedure for the multidimensional 0\u20131 knapsack problem. Discrete applied mathematics 49.1-3 (1994): 189-212. Fr\u00e9ville, Arnaud. The multidimensional 0\u20131 knapsack problem: An overview. European Journal of Operational Research 155.1 (2004): 1-21.","title":"Random instance generator"},{"location":"problems/#challenge-a_2","text":"250 variables, 10 constraints, fixed weights $w \\sim U(0, 1000), \\gamma \\sim U(0.95, 1.05)$ $K = 500, u \\sim U(0, 1), \\alpha = 0.25$ 500 training instances, 50 test instances MultiKnapsackGenerator(n=randint(low=250, high=251), m=randint(low=10, high=11), w=uniform(loc=0.0, scale=1000.0), K=uniform(loc=500.0, scale=0.0), u=uniform(loc=0.0, scale=1.0), alpha=uniform(loc=0.25, scale=0.0), fix_w=True, w_jitter=uniform(loc=0.95, scale=0.1), )","title":"Challenge A"},{"location":"usage/","text":"Usage Installation MIPLearn is mainly written in Python, with some components written in Julia. For this reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and its Python bindings must be properly installed. Supported solvers are CPLEX and Gurobi. Optimization problems currently need to be specified in the Pyomo modeling language. A JuMP interface to the package is currently under development. To install MIPLearn, run the following commands: git clone https://github.com/ANL-CEEESA/MIPLearn.git cd MIPLearn make install After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Note To install MIPLearn in another Python environment, switch to that enviroment before running make install . To install the package in development mode, run make develop instead. Using LearningSolver The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process. Describing problem instances Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance. Obtaining heuristic solutions By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible. Saving and loading solver state After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance) Solving training instances in parallel In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances) Current Limitations Only binary and continuous decision variables are currently supported.","title":"Usage"},{"location":"usage/#usage","text":"","title":"Usage"},{"location":"usage/#installation","text":"MIPLearn is mainly written in Python, with some components written in Julia. For this reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and its Python bindings must be properly installed. Supported solvers are CPLEX and Gurobi. Optimization problems currently need to be specified in the Pyomo modeling language. A JuMP interface to the package is currently under development. To install MIPLearn, run the following commands: git clone https://github.com/ANL-CEEESA/MIPLearn.git cd MIPLearn make install After installation, the package miplearn should become available to Python. It can be imported as follows: import miplearn Note To install MIPLearn in another Python environment, switch to that enviroment before running make install . To install the package in development mode, run make develop instead.","title":"Installation"},{"location":"usage/#using-learningsolver","text":"The main class provided by this package is LearningSolver , a learning-enhanced MIP solver which uses information from previously solved instances to accelerate the solution of new instances. The following example shows its basic usage: from miplearn import LearningSolver # List of user-provided instances training_instances = [...] test_instances = [...] # Create solver solver = LearningSolver() # Solve all training instances for instance in training_instances: solver.solve(instance) # Learn from training instances solver.fit(training_instances) # Solve all test instances for instance in test_instances: solver.solve(instance) In this example, we have two lists of user-provided instances: training_instances and test_instances . We start by solving all training instances. Since there is no historical information available at this point, the instances will be processed from scratch, with no ML acceleration. After solving each instance, the solver stores within each instance object the optimal solution, the optimal objective value, and other information that can be used to accelerate future solves. After all training instances are solved, we call solver.fit(training_instances) . This instructs the solver to train all its internal machine-learning models based on the solutions of the (solved) trained instances. Subsequent calls to solver.solve(instance) will automatically use the trained Machine Learning models to accelerate the solution process.","title":"Using LearningSolver"},{"location":"usage/#describing-problem-instances","text":"Instances to be solved by LearningSolver must derive from the abstract class miplearn.Instance . The following three abstract methods must be implemented: instance.to_model() , which returns a concrete Pyomo model corresponding to the instance; instance.get_instance_features() , which returns a 1-dimensional Numpy array of (numerical) features describing the entire instance; instance.get_variable_features(var_name, index) , which returns a 1-dimensional array of (numerical) features describing a particular decision variable. The first method is used by LearningSolver to construct a concrete Pyomo model, which will be provided to the internal MIP solver. The second and third methods provide an encoding of the instance, which can be used by the ML models to make predictions. In the knapsack problem, for example, an implementation may decide to provide as instance features the average weights, average prices, number of items and the size of the knapsack. The weight and the price of each individual item could be provided as variable features. See src/python/miplearn/problems/knapsack.py for a concrete example. An optional method which can be implemented is instance.get_variable_category(var_name, index) , which returns a category (a string, an integer or any hashable type) for each decision variable. If two variables have the same category, LearningSolver will use the same internal ML model to predict the values of both variables. By default, all variables belong to the \"default\" category, and therefore only one ML model is used for all variables. If the returned category is None , ML predictors will ignore the variable. It is not necessary to have a one-to-one correspondence between features and problem instances. One important (and deliberate) limitation of MIPLearn, however, is that get_instance_features() must always return arrays of same length for all relevant instances of the problem. Similarly, get_variable_features(var_name, index) must also always return arrays of same length for all variables in each category. It is up to the user to decide how to encode variable-length characteristics of the problem into fixed-length vectors. In graph problems, for example, graph embeddings can be used to reduce the (variable-length) lists of nodes and edges into a fixed-length structure that still preserves some properties of the graph. Different instance encodings may have significant impact on performance.","title":"Describing problem instances"},{"location":"usage/#obtaining-heuristic-solutions","text":"By default, LearningSolver uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts. For more significant performance benefits, LearningSolver can also be configured to place additional trust in the Machine Learning predictors, by using the mode=\"heuristic\" constructor argument. When operating in this mode, if a ML model is statistically shown (through stratified k-fold cross validation ) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see references and benchmark results ). Danger The heuristic mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.","title":"Obtaining heuristic solutions"},{"location":"usage/#saving-and-loading-solver-state","text":"After solving a large number of training instances, it may be desirable to save the current state of LearningSolver to disk, so that the solver can still use the acquired knowledge after the application restarts. This can be accomplished by using the standard pickle module, as the following example illustrates: from miplearn import LearningSolver import pickle # Solve training instances training_instances = [...] solver = LearningSolver() for instance in training_instances: solver.solve(instance) # Train machine-learning models solver.fit(training_instances) # Save trained solver to disk pickle.dump(solver, open(\"solver.pickle\", \"wb\")) # Application restarts... # Load trained solver from disk solver = pickle.load(open(\"solver.pickle\", \"rb\")) # Solve additional instances test_instances = [...] for instance in test_instances: solver.solve(instance)","title":"Saving and loading solver state"},{"location":"usage/#solving-training-instances-in-parallel","text":"In many situations, training and test instances can be solved in parallel to accelerate the training process. LearningSolver provides the method parallel_solve(instances) to easily achieve this: from miplearn import LearningSolver training_instances = [...] solver = LearningSolver() solver.parallel_solve(training_instances, n_jobs=4) solver.fit(training_instances) # Test phase... test_instances = [...] solver.parallel_solve(test_instances)","title":"Solving training instances in parallel"},{"location":"usage/#current-limitations","text":"Only binary and continuous decision variables are currently supported.","title":"Current Limitations"}]} \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml index 4673250..c14dcb5 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -1,33 +1,27 @@ - - + None - 2020-03-17 + 2020-04-12 daily - - + None - 2020-03-17 + 2020-04-12 daily - - + None - 2020-03-17 + 2020-04-12 daily - - + None - 2020-03-17 + 2020-04-12 daily - - + None - 2020-03-17 + 2020-04-12 daily - - + None - 2020-03-17 + 2020-04-12 daily \ No newline at end of file diff --git a/docs/sitemap.xml.gz b/docs/sitemap.xml.gz index df7029a..92cd958 100644 Binary files a/docs/sitemap.xml.gz and b/docs/sitemap.xml.gz differ diff --git a/docs/usage/index.html b/docs/usage/index.html index 9cd627f..c998faa 100644 --- a/docs/usage/index.html +++ b/docs/usage/index.html @@ -8,7 +8,7 @@ - + Usage - MIPLearn @@ -153,8 +153,10 @@

Installation

MIPLearn is mainly written in Python, with some components written in Julia. For this reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and -its Python bindings must be properly installed. Currently supported solvers include CPLEX and -Gurobi. To install MIPLearn, run the following commands:

+its Python bindings must be properly installed. Supported solvers are CPLEX and +Gurobi. Optimization problems currently need to be specified in the Pyomo modeling language. +A JuMP interface to the package is currently under development.

+

To install MIPLearn, run the following commands:

git clone https://github.com/ANL-CEEESA/MIPLearn.git
 cd MIPLearn
 make install
diff --git a/src/docs/usage.md b/src/docs/usage.md
index 51018eb..01c8b30 100644
--- a/src/docs/usage.md
+++ b/src/docs/usage.md
@@ -5,8 +5,11 @@
 
 MIPLearn is mainly written in Python, with some components written in Julia. For this
 reason, both Python 3.6+ and Julia 1.3+ are required. A mixed-integer solver is also required, and
-its Python bindings must be properly installed. Currently supported solvers include CPLEX and
-Gurobi. To install MIPLearn, run the following commands: 
+its Python bindings must be properly installed. Supported solvers are CPLEX and
+Gurobi. Optimization problems currently need to be specified in the Pyomo modeling language.
+A JuMP interface to the package is currently under development.
+
+To install MIPLearn, run the following commands: 
 
 ```bash
 git clone https://github.com/ANL-CEEESA/MIPLearn.git