diff --git a/src/jl/MCP_heuristics.jl b/src/jl/MCP_heuristics.jl index 035f66c498b651680ec449dbfdf269dafc0027f5..85fab6b37ed919365e72fcec54dbe4b267beb8d2 100644 --- a/src/jl/MCP_heuristics.jl +++ b/src/jl/MCP_heuristics.jl @@ -438,7 +438,9 @@ function stochastic_threshold_greedy_algorithm(D::Array{Float64,2}, c::Float64, obj_incumbent = obj_candidate locations_added += 1 end - return ind_incumbent, obj_incumbent + x_incumbent = zeros(Float64, L) + x_incumbent[ind_ones_incumbent] .= 1. + return x_incumbent, obj_incumbent end function stochastic_threshold_greedy_algorithm_trajectory(D::Array{Float64,2}, c::Float64, n::Float64, p::Float64) @@ -483,7 +485,9 @@ function stochastic_threshold_greedy_algorithm_trajectory(D::Array{Float64,2}, c obj_incumbent_vec[locations_added+1] = obj_incumbent locations_added += 1 end - return ind_incumbent, obj_incumbent + x_incumbent = zeros(Float64, L) + x_incumbent[ind_ones_incumbent] .= 1. + return x_incumbent, obj_incumbent end @@ -596,5 +600,7 @@ function simulated_annealing_local_search(D::Array{Float64, 2}, c::Float64, n::F end end obj_incumbent = sum(y_incumbent) - return ind_ones_incumbent, obj_incumbent, obj -end \ No newline at end of file + x_incumbent = zeros(Float64, L) + x_incumbent[ind_ones_incumbent] .= 1. + return x_incumbent, obj_incumbent, obj +end diff --git a/src/jl/SitingHeuristics.jl b/src/jl/SitingHeuristics.jl index 6b9c88cee954dd2b90ba2fe85ab12cd2b99485a5..d2fc05824d8f76ce550ed3b4bfa7e84282663b61 100644 --- a/src/jl/SitingHeuristics.jl +++ b/src/jl/SitingHeuristics.jl @@ -35,12 +35,12 @@ function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run, run = string(run) p = string(p) data_path = string(data_path) - legacy_index = Vector{Float64}(undef, 0) + legacy_index = Vector{Int64}(undef, 0) W, L = size(D) P = maximum(values(index_dict)) - n = deployment_dict[1] + n = convert(Float64, deployment_dict[1]) if run == "MIR" @@ -48,6 +48,7 @@ function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run, x_init = solve_MILP(D, c, n, "Gurobi") for r = 1:R + println(r) x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search(D, c, n, N, I, E, x_init, T_init, legacy_index) end diff --git a/src/jl/optimisation_models.jl b/src/jl/optimisation_models.jl index eea6c7f6d90ca803a795673ff908e3f9f8ac2390..5a8823deb0d09986a2222a08f9d4fec43d75cd81 100644 --- a/src/jl/optimisation_models.jl +++ b/src/jl/optimisation_models.jl @@ -7,7 +7,7 @@ function solve_MILP(D::Array{Float64, 2}, c::Float64, n::Float64, solver::String L = size(D)[2] if solver == "Gurobi" - MILP_model = Model(optimizer_with_attributes(Gurobi.Optimizer, "TimeLimit" => 7200., "MIPGap" => 0.05)) + MILP_model = Model(optimizer_with_attributes(Gurobi.Optimizer, "TimeLimit" => 7200., "MIPGap" => 0.01, "LogToConsole" => 0)) else println("Please use Cbc or Gurobi") throw(ArgumentError) diff --git a/src/main.py b/src/main.py index e6b9cc42b7757e999be4c4f4c0361866ad9f0ef4..0413fa5013b7058c5e146212a36470be71972927 100644 --- a/src/main.py +++ b/src/main.py @@ -154,7 +154,7 @@ if __name__ == '__main__': end = time.time() print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs'], 1)} s") - output_folder = init_folder(model_parameters, c, suffix=f"_LS_{args['LS_init_algorithm']}") + output_folder = init_folder(model_parameters, c, suffix=f"_LS_{args['LS_init_algorithm']}_bis") with open(join(output_folder, 'config_model.yaml'), 'w') as outfile: yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False) @@ -204,7 +204,7 @@ if __name__ == '__main__': end = time.time() print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs'], 1)} s") - output_folder = init_folder(model_parameters, c, suffix=f"_GRED_{params['algorithm']}") + output_folder = init_folder(model_parameters, c, suffix=f"_GRED_{params['algorithm']}_bis") pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb')) pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb')) @@ -227,7 +227,7 @@ if __name__ == '__main__': end = time.time() print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs'], 1)} s") - output_folder = init_folder(model_parameters, c, suffix=f"_GRED_{params['algorithm']}_p{params['p']}") + output_folder = init_folder(model_parameters, c, suffix=f"_GRED_{params['algorithm']}_p{params['p']}_bis") pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb')) pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))