diff --git a/config_model.yml b/config_model.yml index 889b65c01a2f6d44dc50fa7ad665d431567138b1..e49eb3fe3cee8eb2a8a0d280b1c360d7a987ea96 100644 --- a/config_model.yml +++ b/config_model.yml @@ -31,15 +31,14 @@ siting_params: mipgap: 0.01 timelimit: 43200 threads: 0 - MIRSA: - # Simulated Annealing with Local Search + LS: set: False neighborhood: 1 no_iterations: 2000 no_epochs: 500 - initial_temp: 200. - no_runs: 50 - algorithm: 'SALS' # 'GLS' (Greedy Local Search), 'RSSA', 'SALS', 'STGHSAS', 'STGHSAP' + initial_temp: 100. + no_runs: 100 + algorithm: 'MIR' # 'SGH', 'RS' GRED_DET: set: False no_runs: 100 @@ -57,14 +56,3 @@ siting_params: no_epochs: 1000 no_runs: 100 algorithm: 'RS' - SGHLS: - # Stochastic greedy & simulated annealing - set: False - neighborhood: 1 - no_iterations: 2000 - no_epochs: 500 - initial_temp: 200. - no_runs_LS: 50 - algorithm: 'STGHSAP' # 'STGHSAP' - p: 27 # 10% of 10150 locations - no_runs_SGH: 200 diff --git a/src/jl/SitingHeuristics.jl b/src/jl/SitingHeuristics.jl index d76cc1b4a9b83066d567574e02049306e5960899..b0954cf163cddd8d8f7b7017e8d50d5c143c79d6 100644 --- a/src/jl/SitingHeuristics.jl +++ b/src/jl/SitingHeuristics.jl @@ -5,7 +5,22 @@ using Random include("optimisation_models.jl") include("MCP_heuristics.jl") -function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run) +pickle = pyimport("pickle") + +#################### Useful Functions ####################### + +function myunpickle(filename) + + r = nothing + @pywith pybuiltin("open")(filename,"rb") as f begin + + r = pickle.load(f) + end + return r + +end + +function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run, p, data_path) index_dict = Dict([(convert(Int64, k), convert(Int64, index_dict[k])) for k in keys(index_dict)]) deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)]) @@ -18,22 +33,24 @@ function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run) T_init = convert(Float64, T_init) R = convert(Int64, R) run = string(run) + p = string(p) + data_path = string(data_path) W, L = size(D) P = maximum(values(index_dict)) n_partitions = [deployment_dict[i] for i in 1:P] - if run == "SALS" + if run == "MIR" x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I) x_init = solve_MILP_partitioning(D, c, n_partitions, index_dict, "Gurobi") - + for r = 1:R x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search_partition(D, c, n_partitions, N, I, E, x_init, T_init, index_dict) end - elseif run == "RSSA" + elseif run == "RS" x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I) x_init = zeros(Int64, L) @@ -50,68 +67,25 @@ function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run) x_init = convert.(Float64, x_init) for r = 1:R + println(r) x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search_partition(D, c, n_partitions, N, I, E, x_init, T_init, index_dict) end - else - println("No such run available.") - throw(ArgumentError) - end - - return x_sol, LB_sol, obj_sol - -end - -function main_SGHLS(index_dict, deployment_dict, D, c, N, I, E, T_init, p, R_SGH, R_LS, run) - - index_dict = Dict([(convert(Int64, k), convert(Int64, index_dict[k])) for k in keys(index_dict)]) - deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)]) - n = convert(Float64, deployment_dict[1]) - D = convert.(Float64, D) - - c = convert(Float64, c) - N = convert(Int64, N) - I = convert(Int64, I) - E = convert(Int64, E) - T_init = convert(Float64, T_init) - p = convert(Float64, p) - R_SGH = convert(Int64, R_SGH) - R_LS = convert(Int64, R_LS) - run = string(run) - - n_partitions = [deployment_dict[i] for i in 1:maximum(values(index_dict))] + elseif run == "SGH" - W, L = size(D) - - if run == "STGHSAS" - - x_init, LB_init = Array{Float64, 2}(undef, R_SGH, L), Array{Float64, 1}(undef, R_SGH) - x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R_LS, L), Array{Float64, 1}(undef, R_LS), Array{Float64, 2}(undef, R_LS, I) + x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I) - for r = 1:R_SGH - x_init[r, :], LB_init[r] = stochastic_threshold_greedy_algorithm(D, c, n, p) - end + # Read LB_init from file + LB_init = myunpickle(joinpath(data_path, "objective_vector.p")) + # Read x_init from file + x_init = myunpickle(joinpath(data_path, "solution_matrix.p")) LB_init_best = argmax(LB_init) x_init_best = x_init[LB_init_best, :] - for r = 1:R_LS - x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search_partition(D, c, n_partitions, N, I, E, x_init_best, T_init, index_dict) - end - - elseif run == "STGHSAP" - - x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R_LS, L), Array{Float64, 1}(undef, R_LS), Array{Float64, 2}(undef, R_LS, I) - - for r = 1:R_LS - - x_init, LB_init = Array{Float64, 2}(undef, 1, L), Array{Float64, 1}(undef, 1) - x_init[1, :], LB_init[1] = stochastic_threshold_greedy_algorithm(D, c, n, p) - - x_init_best = x_init[1, :] - + for r = 1:R + println(r) x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search_partition(D, c, n_partitions, N, I, E, x_init_best, T_init, index_dict) - end else @@ -123,8 +97,6 @@ function main_SGHLS(index_dict, deployment_dict, D, c, N, I, E, T_init, p, R_SGH end - - function main_GRED(deployment_dict, D, c, R, p, run) deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)]) @@ -216,4 +188,4 @@ function main_LSEA(index_dict, deployment_dict, D, c, N, I, E, run) return x_sol, LB_sol, obj_sol -end \ No newline at end of file +end diff --git a/src/jl/optimisation_models.jl b/src/jl/optimisation_models.jl index 2ae04ed955c369d60665ab284cb5e8fee30de436..eea6c7f6d90ca803a795673ff908e3f9f8ac2390 100644 --- a/src/jl/optimisation_models.jl +++ b/src/jl/optimisation_models.jl @@ -50,7 +50,7 @@ function solve_MILP_partitioning(D::Array{Float64, 2}, c::Float64, n::Array{Int6 # Selects solver if solver == "Gurobi" - MILP_model = Model(optimizer_with_attributes(Gurobi.Optimizer, "TimeLimit" => 7200., "MIPGap" => 0.01)) + MILP_model = Model(optimizer_with_attributes(Gurobi.Optimizer, "TimeLimit" => 7200., "MIPGap" => 0.01, "LogToConsole" => 0)) else println("Please use Cbc or Gurobi") throw(ArgumentError) diff --git a/src/main.py b/src/main.py index 109c98665fd2868a459ecb90676677c9a84dd893..a02a5559ed553d014a950ba5e3513f082b83c57a 100644 --- a/src/main.py +++ b/src/main.py @@ -20,11 +20,12 @@ def parse_args(): parser.add_argument('--p', type=int, default=None) parser.add_argument('--run_BB', type=bool, default=False) parser.add_argument('--run_MIR', type=bool, default=False) - parser.add_argument('--run_MIRSA', type=bool, default=False) + parser.add_argument('--run_LS', type=bool, default=False) parser.add_argument('--run_GRED_DET', type=bool, default=False) parser.add_argument('--run_GRED_STO', type=bool, default=False) parser.add_argument('--run_RAND', type=bool, default=False) - parser.add_argument('--run_SGHLS', type=bool, default=False) + parser.add_argument('--LS_init_algorithm', type=str, default=None) + parser.add_argument('--init_sol_folder', type=str, default=None) parsed_args = vars(parser.parse_args()) @@ -88,18 +89,15 @@ if __name__ == '__main__': siting_parameters['solution_method']['BB']['set'] = args['run_BB'] siting_parameters['solution_method']['BB']['mir'] = args['run_MIR'] - siting_parameters['solution_method']['MIRSA']['set'] = args['run_MIRSA'] + siting_parameters['solution_method']['LS']['set'] = args['run_LS'] siting_parameters['solution_method']['GRED_DET']['set'] = args['run_GRED_DET'] siting_parameters['solution_method']['GRED_STO']['set'] = args['run_GRED_STO'] siting_parameters['solution_method']['GRED_STO']['p'] = args['p'] siting_parameters['solution_method']['RAND']['set'] = args['run_RAND'] - siting_parameters['solution_method']['SGHLS']['set'] = args['run_SGHLS'] - siting_parameters['solution_method']['SGHLS']['p'] = args['p'] c = args['c'] - if not single_true([args['run_BB'], args['run_MIRSA'], args['run_GRED_DET'], args['run_GRED_STO'], - args['run_RAND'], args['run_SGHLS']]): + if not single_true([args['run_BB'], args['run_LS'], args['run_GRED_DET'], args['run_GRED_STO'], args['run_RAND']]): raise ValueError(' More than one run selected in the argparser.') if siting_parameters['solution_method']['BB']['set']: @@ -132,12 +130,14 @@ if __name__ == '__main__': pickle.dump(x_values, open(join(output_folder, 'solution_matrix.p'), 'wb')) pickle.dump(objective, open(join(output_folder, 'objective_vector.p'), 'wb')) - elif siting_parameters['solution_method']['MIRSA']['set']: + elif siting_parameters['solution_method']['LS']['set']: - custom_log(' MIRSA chosen to solve the IP. Opening a Julia instance.') - params = siting_parameters['solution_method']['MIRSA'] + custom_log(f" LS_{args['LS_init_algorithm']} chosen to solve the IP. Opening a Julia instance.") + params = siting_parameters['solution_method']['LS'] jl_dict = generate_jl_input(deployment_dict, site_coordinates) + path_to_sol = args['init_sol_folder'] + str(args['c']) + '_GRED_STGH_p' + str(args['p']) + path_to_init_sol_folder = join(data_path, 'output', path_to_sol) import julia j = julia.Julia(compiled_modules=False) @@ -149,11 +149,12 @@ if __name__ == '__main__': criticality_data, c, params['neighborhood'], params['no_iterations'], params['no_epochs'], params['initial_temp'], params['no_runs'], - params['algorithm']) + args['LS_init_algorithm'], + args['p'], path_to_init_sol_folder) end = time.time() print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs'], 1)} s") - output_folder = init_folder(model_parameters, c, suffix=f"_MIRSA_{params['algorithm']}") + output_folder = init_folder(model_parameters, c, suffix=f"_LS_{args['--LS_init_algorithm']}") with open(join(output_folder, 'config_model.yaml'), 'w') as outfile: yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False) @@ -231,33 +232,5 @@ if __name__ == '__main__': pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb')) pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb')) - elif siting_parameters['solution_method']['SGHLS']['set']: - - params = siting_parameters['solution_method']['SGHLS'] - custom_log(f" SHGLS_{params['algorithm']} chosen to solve the IP. Opening a Julia instance.") - - jl_dict = generate_jl_input(deployment_dict, site_coordinates) - - import julia - j = julia.Julia(compiled_modules=False) - from julia import Main - Main.include("jl/SitingHeuristics.jl") - - start = time.time() - jl_selected, jl_objective, jl_traj = Main.main_SGHLS(jl_dict['index_dict'], jl_dict['deployment_dict'], - criticality_data, c, params['neighborhood'], - params['no_iterations'], params['no_epochs'], - params['initial_temp'], params['p'], - params['no_runs_SGH'], params['no_runs_LS'], - params['algorithm']) - end = time.time() - print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs_LS'], 1)} s") - - output_folder = init_folder(model_parameters, c, suffix=f"_SGHLS_{params['algorithm']}_p{params['p']}") - - pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb')) - pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb')) - pickle.dump(jl_traj, open(join(output_folder, 'trajectory_matrix.p'), 'wb')) - else: raise ValueError(' This solution method is not available. ')