Improve Julia interface

pull/3/head
Alinson S. Xavier 5 years ago
parent 7caca9a882
commit 295358c553

@ -17,9 +17,9 @@ version = "0.5.8"
[[Bzip2_jll]]
deps = ["Libdl", "Pkg"]
git-tree-sha1 = "3663bfffede2ef41358b6fc2e1d8a6d50b3c3904"
git-tree-sha1 = "03a44490020826950c68005cafb336e5ba08b7e8"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.6+2"
version = "1.0.6+4"
[[CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
@ -145,6 +145,12 @@ git-tree-sha1 = "b34d7cef7b337321e97d22242c3c2b91f476748e"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.0"
[[JSON2]]
deps = ["Dates", "Parsers", "Test"]
git-tree-sha1 = "66397cc6c08922f98a28ab05a8d3002f9853b129"
uuid = "2535ab7d-5cd8-5a07-80ac-9b1792aadce3"
version = "0.3.2"
[[JSONSchema]]
deps = ["BinaryProvider", "HTTP", "JSON"]
git-tree-sha1 = "b0a7f9328967df5213691d318a03cf70ea8c76b1"
@ -350,6 +356,6 @@ version = "1.2.0"
[[Zlib_jll]]
deps = ["Libdl", "Pkg"]
git-tree-sha1 = "622d8b6dc0c7e8029f17127703de9819134d1b71"
git-tree-sha1 = "fdd89e5ab270ea0f2a0174bd9093e557d06d4bfa"
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.11+14"
version = "1.2.11+16"

@ -7,6 +7,7 @@ version = "0.1.0"
CPLEX = "a076750e-1247-5638-91d2-ce28b192dca0"
CPLEXW = "cfecb002-79c2-11e9-35be-cb59aa640f85"
Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b"
JSON2 = "2535ab7d-5cd8-5a07-80ac-9b1792aadce3"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"

@ -8,14 +8,34 @@ module MIPLearn
using PyCall
miplearn = pyimport("miplearn")
Instance = miplearn.Instance
LearningSolver = miplearn.LearningSolver
InternalSolver = miplearn.solvers.internal.InternalSolver
BenchmarkRunner = miplearn.BenchmarkRunner
include("jump_solver.jl")
include("knapsack.jl")
macro pycall(expr)
quote
err_msg = nothing
result = nothing
try
result = $(esc(expr))
catch err
args = err.val.args[1]
if (err isa PyCall.PyError) && (args isa String) && startswith(args, "Julia")
err_msg = replace(args, r"Stacktrace.*" => "")
else
rethrow(err)
end
end
if err_msg != nothing
error(err_msg)
end
result
end
end
include("log.jl")
include("jump_solver.jl")
include("learning_solver.jl")
include("instance.jl")
export Instance, LearningSolver, InternalSolver, JuMPSolver, BenchmarkRunner
export Instance, BenchmarkRunner
end # module

@ -0,0 +1,61 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using JSON2
import Base: dump
get_instance_features(instance) = [0.]
get_variable_features(instance, var, index) = [0.]
find_violated_lazy_constraints(instance, model) = []
build_lazy_constraint(instance, model, v) = nothing
dump(instance::PyCall.PyObject, filename) = @pycall instance.dump(filename)
load!(instance::PyCall.PyObject, filename) = @pycall instance.load(filename)
macro Instance(klass)
quote
@pydef mutable struct Wrapper <: Instance
function __init__(self, args...; kwargs...)
self.data = $(esc(klass))(args...; kwargs...)
end
function dump(self, filename)
prev_data = self.data
self.data = JSON2.write(prev_data)
Instance.dump(self, filename)
self.data = prev_data
end
function load(self, filename)
Instance.load(self, filename)
self.data = JSON2.read(self.data, $(esc(klass)))
end
to_model(self) =
$(esc(:to_model))(self.data)
get_instance_features(self) =
get_instance_features(self.data)
get_variable_features(self, var, index) =
get_variable_features(self.data, var, index)
function find_violated_lazy_constraints(self, model)
find_violated_lazy_constraints(self.data, model)
end
function build_lazy_constraint(self, model, v)
build_lazy_constraint(self.data, model, v)
end
end
end
end
export get_instance_features,
get_variable_features,
find_violated_lazy_constraints,
build_lazy_constraint,
dump,
load!,
@Instance

@ -17,6 +17,7 @@ mutable struct JuMPSolverData
model
bin_vars
solution::Union{Nothing,Dict{String,Dict{String,Float64}}}
time_limit::Union{Nothing, Float64}
end
@ -61,6 +62,9 @@ end
function solve(data::JuMPSolverData; tee::Bool=false)
instance, model = data.instance, data.model
if data.time_limit != nothing
JuMP.set_time_limit_sec(model, data.time_limit)
end
wallclock_time = 0
found_lazy = []
log = ""
@ -176,8 +180,7 @@ function set_warm_start!(data::JuMPSolverData, solution)
@info "Setting warm start values for $count variables"
end
@pydef mutable struct JuMPSolver <: InternalSolver
@pydef mutable struct JuMPSolver <: miplearn.solvers.internal.InternalSolver
function __init__(self; optimizer)
self.data = JuMPSolverData(nothing, # basename_idx_to_var
nothing, # var_to_basename_idx
@ -186,6 +189,7 @@ end
nothing, # model
nothing, # bin_vars
nothing, # solution
nothing, # time limit
)
end
@ -208,7 +212,7 @@ end
self.data.solution
set_time_limit(self, time_limit) =
JuMP.set_time_limit_sec(self.data.model, time_limit)
self.data.time_limit = time_limit
set_gap_tolerance(self, gap_tolerance) =
@warn "JuMPSolver: set_gap_tolerance not implemented"
@ -228,3 +232,5 @@ end
error("JuMPSolver.clear_warm_start should never be called")
end
export JuMPSolver, solve!, fit!, add!

@ -1,32 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using PyCall
@pydef mutable struct KnapsackInstance <: Instance
function __init__(self, weights, prices, capacity)
self.weights = weights
self.prices = prices
self.capacity = capacity
end
function to_model(self)
model = Model()
n = length(self.weights)
@variable(model, x[1:n], Bin)
@objective(model, Max, sum(x[i] * self.prices[i] for i in 1:n))
@constraint(model, sum(x[i] * self.weights[i] for i in 1:n) <= self.capacity)
return model
end
function get_instance_features(self)
return [0.]
end
function get_variable_features(self, var, index)
return [0.]
end
end
export KnapsackInstance

@ -0,0 +1,28 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
struct LearningSolver
py::PyCall.PyObject
end
function LearningSolver(;
optimizer,
kwargs...,
)::LearningSolver
py = @pycall miplearn.LearningSolver(;
kwargs...,
solver=JuMPSolver(optimizer=optimizer))
return LearningSolver(py)
end
solve!(solver::LearningSolver, instance; kwargs...) =
@pycall solver.py.solve(instance; kwargs...)
fit!(solver::LearningSolver, instances; kwargs...) =
@pycall solver.py.fit(instances; kwargs...)
add!(solver::LearningSolver, component; kwargs...) =
@pycall solver.py.add(component; kwargs...)
export LearningSolver

@ -0,0 +1,34 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import MIPLearn: get_instance_features,
get_variable_features
find_violated_lazy_constraints
using JuMP
struct KnapsackData
weights
prices
capacity
end
function to_model(data::KnapsackData)
model = Model()
n = length(data.weights)
@variable(model, x[1:n], Bin)
@objective(model, Max, sum(x[i] * data.prices[i] for i in 1:n))
@constraint(model, sum(x[i] * data.weights[i] for i in 1:n) <= data.capacity)
return model
end
function get_instance_features(data::KnapsackData)
return [0.]
end
function get_variable_features(data::KnapsackData, var, index)
return [0.]
end
KnapsackInstance = @Instance(KnapsackData)

@ -7,15 +7,32 @@ using MIPLearn
using CPLEX
using Gurobi
@testset "Instance" begin
weights = [23., 26., 20., 18.]
prices = [505., 352., 458., 220.]
capacity = 67.0
instance = KnapsackInstance(weights, prices, capacity)
dump(instance, "tmp/instance.json.gz")
instance = KnapsackInstance([0.0], [0.0], 0.0)
load!(instance, "tmp/instance.json.gz")
@test instance.data.weights == weights
@test instance.data.prices == prices
@test instance.data.capacity == capacity
end
@testset "LearningSolver" begin
for optimizer in [CPLEX.Optimizer, Gurobi.Optimizer]
instance = KnapsackInstance([23., 26., 20., 18.],
[505., 352., 458., 220.],
67.0)
model = instance.to_model()
solver = LearningSolver(solver=JuMPSolver(optimizer=optimizer),
mode="heuristic")
stats = solver.solve(instance, model)
solver = LearningSolver(optimizer=optimizer,
mode="heuristic",
time_limit=90)
stats = solve!(solver, instance)
@test instance.solution["x"]["1"] == 1.0
@test instance.solution["x"]["2"] == 0.0
@test instance.solution["x"]["3"] == 1.0
@ -27,7 +44,7 @@ using Gurobi
@test round(instance.lp_solution["x"]["3"], digits=3) == 1.000
@test round(instance.lp_solution["x"]["4"], digits=3) == 0.000
@test round(instance.lp_value, digits=3) == 1287.923
solver.fit([instance])
solver.solve(instance)
fit!(solver, [instance])
solve!(solver, instance)
end
end

@ -8,6 +8,7 @@ using MIPLearn
MIPLearn.setup_logger()
@testset "MIPLearn" begin
include("knapsack.jl")
include("jump_solver_test.jl")
include("learning_solver_test.jl")
end
Loading…
Cancel
Save