From c5b77d33274c2f0c5837402386dc4589327cf7ce Mon Sep 17 00:00:00 2001 From: dcradu <dcradu@uliege.be> Date: Tue, 30 Mar 2021 14:16:25 +0200 Subject: [PATCH] updated greedy algorithms, minor changes for testing on zeus and pan --- config_model.yml | 38 +-- src/helpers.py | 4 +- src/jl/MCP_heuristics.jl | 593 ++++++++----------------------------- src/jl/SitingHeuristics.jl | 35 +-- src/main.py | 63 ++-- src/models.py | 9 +- src/tools.py | 4 +- 7 files changed, 194 insertions(+), 552 deletions(-) diff --git a/config_model.yml b/config_model.yml index fb86ffd..90608e9 100644 --- a/config_model.yml +++ b/config_model.yml @@ -1,15 +1,16 @@ # Path to data folder data_path: 'D:/ULg_PhD_work/datasets/resite_ip/' #data_path: '/data/dcradu/resite_ip/' +#data_path: '../../EVO/dcradu/resite_ip/' # Spatial resolution (in degrees) of the potential sites. -spatial_resolution: 0.28 +spatial_resolution: 0.25 # Start time and end time of the analysis. -time_slice: ['2014-01-01T00:00', '2014-01-07T23:00'] +time_slice: ['2017-01-01T00:00', '2019-12-31T23:00'] # Technologies to deploy. -regions: ['FR', 'ES'] +regions: ['EU'] technologies: ['wind_onshore'] -deployments: [[19], [12]] +deployments: [[560]] siting_params: smooth_measure: 'mean' @@ -23,32 +24,33 @@ siting_params: solution_method: BB: # Branch & Bound - set: False - c: 1 + set: True + mir: True + c: [1, 112, 224, 336, 448, 560] solver: 'gurobi' - mipgap: 0.05 - timelimit: 1800 - threads: 4 + mipgap: 0.01 + timelimit: 3600 + threads: 1 MIRSA: # Simulated Annealing with Local Search - set: True - c: [1, 2, 4, 10, 19] + set: False + c: [1, 112, 224, 336, 448, 560] neighborhood: 1 - no_iterations: 1000 - no_epochs: 1000 + no_iterations: 2000 + no_epochs: 500 initial_temp: 200. no_runs: 1 - algorithm: 'SALS' #'GLS' + algorithm: 'SALS' # 'GLS' (Greedy Local Search) GRED: set: False - epsilon: 0.001 - c: [1, 106, 212, 318, 424, 530] + p: 0.10 # [0.05, 0.10, 0.15, 0.20] + c: [1, 112, 224, 336, 448, 560] no_runs: 1 - algorithm: 'RGH' # SGA + algorithm: 'STGH' # TGH, STGH RAND: # Random Search set: False - c: [318] + c: [1, 112, 224, 336, 448, 560] no_iterations: 50 no_epochs: 500 no_runs: 1 diff --git a/src/helpers.py b/src/helpers.py index ed4b8a6..5069de5 100644 --- a/src/helpers.py +++ b/src/helpers.py @@ -339,9 +339,9 @@ def return_coordinates_from_shapefiles(resource_dataset, shapefiles_region): def retrieve_load_data_partitions(data_path, date_slice, alpha, delta, regions, norm_type): - load_data_fn = join(data_path, 'input/load_data', 'load_2009_2018.csv') + load_data_fn = join(data_path, 'input/load_data', 'load_2000_2019.csv') load_data = read_csv(load_data_fn, index_col=0) - load_data.index = date_range('2009-01-01T00:00', '2018-12-31T23:00', freq='H') + load_data.index = date_range('2000-01-02T00:00', '2019-12-31T23:00', freq='H') load_data_sliced = load_data.loc[date_slice[0]:date_slice[1]] regions_list = return_region_divisions(regions, data_path) diff --git a/src/jl/MCP_heuristics.jl b/src/jl/MCP_heuristics.jl index ef3f516..eedd0fb 100644 --- a/src/jl/MCP_heuristics.jl +++ b/src/jl/MCP_heuristics.jl @@ -1,189 +1,6 @@ using StatsBase using Distributions -#################### Randomised Greedy Heuristic ####################### - -# Description: function implementing a randomised greedy heuristic for unpartitioned geographical regions -# -# Comments: 1) types of inputs should match those declared in argument list -# 2) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value -# -# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations -# c - global criticality threshold -# n - number of sites to deploy -# -# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration -# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective -# - -function randomised_greedy_heuristic(D::Array{Float64,2}, c::Float64, n::Float64) - - W, L = size(D) - x_incumbent = zeros(Float64, L) - ind_incumbent = [i for i in 1:L] - Dx_incumbent = zeros(Float64, W) - @inbounds for ind in ind_incumbent - Dx_incumbent .+= view(D, :, ind) - end - y_incumbent = Dx_incumbent .>= c - LB_incumbent = sum(y_incumbent) - Dx_tmp = Vector{Float64}(undef, W) - y_tmp = Vector{Float64}(undef, W) - locations_removed = 0 - @inbounds while locations_removed < L - n - LB_diff_candidate = -LB_incumbent - ind_candidate_list = Vector{Int64}(undef, 0) - @inbounds for ind in ind_incumbent - Dx_tmp .= Dx_incumbent .- view(D, :, ind) - y_tmp .= Dx_tmp .>= c - LB_diff_tmp = sum(y_tmp) - LB_incumbent - if LB_diff_tmp >= LB_diff_candidate - if LB_diff_tmp > LB_diff_candidate - ind_candidate_list = [ind] - LB_diff_candidate = LB_diff_tmp - else - ind_candidate_list = union(ind, ind_candidate_list) - end - end - end - ind_candidate = sample(ind_candidate_list) - ind_incumbent = setdiff(ind_incumbent, ind_candidate) - Dx_incumbent .= Dx_incumbent .- view(D, :, ind_candidate) - y_incumbent .= Dx_incumbent .>= c - LB_incumbent = sum(y_incumbent) - locations_removed += 1 - end - x_incumbent[ind_incumbent] .= 1. - return x_incumbent, LB_incumbent - -end - -function time_randomised_greedy_heuristic(D::Array{Float64,2}, c::Float64, n::Float64) - @time randomised_greedy_heuristic(D, c, n) -end - - -#################### Classic Greedy Heuristic ####################### - -# Description: function implementing a classic greedy algorithm (Nemhauser et al, 1978) with random selection of tied locations for unpartitioned geographical regions -# -# Comments: 1) types of inputs should match those declared in argument list -# 2) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value -# -# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations -# c - global criticality threshold -# n - number of sites to deploy -# -# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration -# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective -# - -function classic_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64) - - W, L = size(D) - x_incumbent = zeros(Float64, L) - ind_compl_incumbent = [i for i in 1:L] - ind_incumbent = [] - Dx_incumbent = zeros(Float64, W) - y_incumbent = zeros(Float64, W) - LB_incumbent = 0 - Dx_tmp = Vector{Float64}(undef, W) - y_tmp = Vector{Float64}(undef, W) - locations_added = 0 - @inbounds while locations_added < n - println("Locations added ", locations_added, "/", n) - LB_diff_candidate = 0 - ind_candidate_list = Vector{Int64}(undef, 0) - @inbounds for ind in setdiff(ind_compl_incumbent, ind_incumbent) - Dx_tmp .= Dx_incumbent .+ view(D, :, ind) - y_tmp .= Dx_tmp .>= c - LB_diff_tmp = sum(y_tmp) - LB_incumbent - if LB_diff_tmp > LB_diff_candidate - ind_candidate_list = [ind] - LB_diff_candidate = LB_diff_tmp - elseif LB_diff_tmp == LB_diff_candidate - ind_candidate_list = union(ind, ind_candidate_list) - end - end - ind_candidate = sample(ind_candidate_list) - ind_incumbent = union(ind_incumbent, ind_candidate) - Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate) - y_incumbent .= Dx_incumbent .>= c - LB_incumbent = sum(y_incumbent) - locations_added += 1 - end - x_incumbent[ind_incumbent] .= 1. - return x_incumbent, LB_incumbent - -end - -function time_classic_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64) - @time classic_greedy_algorithm(D, c, n) -end - -#################### Stochastic Greedy Algorithm for Submodular Maximization ####################### - -# Description: function implementing the stochastic greedy algorithm for submodular maximization (Mirzasoleiman et al, 2015) for unpartitioned geographical regions -# -# Comments: 1) types of inputs should match those declared in argument list -# 2) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value -# -# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations -# c - global criticality threshold -# n - number of sites to deploy -# -# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration -# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective -# - -function stochastic_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64, epsilon::Float64) - - W, L = size(D) - s = convert(Int64, round((L/n)*log(1/epsilon))) - x_incumbent = zeros(Float64, L) - random_ind_set = Vector{Int64}(undef, s) - ind_compl_incumbent = [i for i in 1:L] - ind_incumbent = [] - Dx_incumbent = zeros(Float64, W) - y_incumbent = zeros(Float64, W) - LB_incumbent = 0 - Dx_tmp = Vector{Float64}(undef, W) - y_tmp = Vector{Float64}(undef, W) - locations_added = 0 - @inbounds while locations_added < n - LB_diff_candidate = 0 - ind_candidate_list = Vector{Int64}(undef, 0) - random_ind_set .= sample(ind_compl_incumbent, s, replace=false) - @inbounds for ind in random_ind_set - Dx_tmp .= Dx_incumbent .+ view(D, :, ind) - y_tmp .= Dx_tmp .>= c - LB_diff_tmp = sum(y_tmp) - LB_incumbent - if LB_diff_tmp > LB_diff_candidate - ind_candidate_list = [ind] - LB_diff_candidate = LB_diff_tmp - elseif LB_diff_tmp == LB_diff_candidate - ind_candidate_list = union(ind, ind_candidate_list) - end - end - ind_candidate = sample(ind_candidate_list) - ind_incumbent = union(ind_incumbent, ind_candidate) - ind_compl_incumbent = setdiff(ind_compl_incumbent, ind_candidate) - Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate) - y_incumbent .= Dx_incumbent .>= c - LB_incumbent = sum(y_incumbent) - locations_added += 1 - end - x_incumbent[ind_incumbent] .= 1. - return x_incumbent, LB_incumbent - -end - -function time_stochastic_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64, epsilon::Float64) - @time stochastic_greedy_algorithm(D, c, n, epsilon) -end - - - #################### Random Search Algorithm ####################### function random_search(D::Array{Float64, 2}, c::Float64, n::Float64, R::Int64) @@ -211,287 +28,6 @@ function random_search(D::Array{Float64, 2}, c::Float64, n::Float64, R::Int64) return x_incumbent, LB_incumbent end -function time_random_search(D::Array{Float64,2}, c::Float64, n::Float64, R::Int64) - @time random_search(D, c, n, R) -end - - - -#################### Randomised Greedy Heuristic with Partitioning Constraints (Dict Implementation) ####################### - -# Description: function implementing a randomised greedy heuristic for geographical regions partitioned into a set of subregions -# -# Comments: 1) types of inputs should match those declared in argument list -# 2) the implementation relies both on dict and array data structures (as opposed to an array-only implementation) -# 3) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value -# -# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations -# c - global criticality threshold -# n - number of sites to deploy -# locations_regions_mapping - dictionary associating its subregion (value) to each location (key) -# -# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration -# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective -# - -function randomised_greedy_heuristic_partition(D::Array{Float64,2}, c::Float64, n::Vector{Int64}, locations_regions_mapping::Dict{Int64, Int64}) - - W, L = size(D) - P = length(n) - regions = [i for i = 1:P] - locations_count_per_region = zeros(Int64, P) - Dx_incumbent = zeros(Float64, W) - @inbounds for ind = 1:L - locations_count_per_region[locations_regions_mapping[ind]] += 1 - Dx_incumbent .+= view(D, :, ind) - end - - x_incumbent = zeros(Float64, L) - ind_ones = [i for i in 1:L] - ind_incumbent = Dict([(r, Vector{Int64}(undef, locations_count_per_region[r])) for r in regions]) - regions_start_pointer = 1 - @inbounds for r in regions - regions_end_pointer = regions_start_pointer + locations_count_per_region[r] - ind_incumbent[r] = ind_ones[regions_start_pointer:(regions_end_pointer-1)] - regions_start_pointer = regions_end_pointer - end - y_incumbent = Dx_incumbent .>= c - LB_incumbent = sum(y_incumbent) - Dx_tmp = Vector{Float64}(undef, W) - y_tmp = Vector{Float64}(undef, W) - - locations_removed = 0 - @inbounds while locations_removed < L - sum(n) - LB_diff_candidate = -LB_incumbent - ind_candidate_list = Vector{Int64}(undef, 0) - @inbounds for r in regions - if locations_count_per_region[r] > n[r] - @inbounds for ind in ind_incumbent[r] - Dx_tmp .= Dx_incumbent .- view(D, :, ind) - y_tmp .= Dx_tmp .>= c - LB_diff_tmp = sum(y_tmp) - LB_incumbent - if LB_diff_tmp >= LB_diff_candidate - if LB_diff_tmp > LB_diff_candidate - ind_candidate_list = [ind] - LB_diff_candidate = LB_diff_tmp - else - ind_candidate_list = union(ind_candidate_list, ind) - end - end - end - end - end - ind_candidate = sample(ind_candidate_list) - ind_incumbent[locations_regions_mapping[ind_candidate]] = setdiff(ind_incumbent[locations_regions_mapping[ind_candidate]], ind_candidate) - locations_count_per_region[locations_regions_mapping[ind_candidate]] -= 1 - Dx_incumbent .= Dx_incumbent .- view(D, :, ind_candidate) - y_incumbent .= Dx_incumbent .>= c - LB_incumbent = sum(y_incumbent) - locations_removed += 1 - end - @inbounds for r in regions - x_incumbent[ind_incumbent[r]] .= 1. - end - return x_incumbent, LB_incumbent - -end - -function time_randomised_greedy_heuristic_partition(D::Array{Float64,2}, c::Float64, n::Vector{Int64}, locations_regions_mapping::Dict{Int64, Int64}) - @time randomised_greedy_heuristic_partition(D, c, n, locations_regions_mapping) -end - -#################### Greedy Local Search ####################### - -# Description: function implementing a greedy local search for unpartitioned geographical regions -# -# Comments: 1) types of inputs should match those declared in argument list -# -# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations -# c - global criticality threshold -# n - number of sites to deploy -# N - number of locations to swap in order to obtain a neighbour of the incumbent solution -# I - number of iterations (outer loop), defines the number of times the incumbent solution may be updated -# E - number of epochs (inner loop), defines the number of neighbours of the incumbent solution sampled at each iteration -# x_init - initial solution, vector with entries in {0, 1}, with cardinality n and whose dimension is compatible with D -# -# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration -# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective -# obj - vector storing the incumbent objective value at each iteration -# - -function greedy_local_search(D::Array{Float64, 2}, c::Float64, n::Float64, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}) - - W, L = size(D) - - # Pre-allocate lower bound vector - obj = Vector{Int64}(undef, I) - - # Pre-allocate x-related arrays - x_incumbent = Vector{Float64}(undef, L) - ind_ones_incumbent = Vector{Int64}(undef, convert(Int64, n)) - ind_zeros_incumbent = Vector{Int64}(undef, L-convert(Int64, n)) - ind_ones2zeros_candidate = Vector{Int64}(undef, N) - ind_zeros2ones_candidate = Vector{Int64}(undef, N) - ind_ones2zeros_tmp = Vector{Int64}(undef, N) - ind_zeros2ones_tmp = Vector{Int64}(undef, N) - - # Pre-allocate y-related arrays - y_incumbent = Array{Bool}(undef, W, 1) - y_tmp = Array{Bool}(undef, W, 1) - c_threshold = c .* ones(Float64, W, 1) - - Dx_incumbent = Array{Float64}(undef, W, 1) - Dx_tmp = Array{Float64}(undef, W, 1) - - # Initialise - ind_ones_incumbent .= findall(x_init .== 1.) - ind_zeros_incumbent .= findall(x_init .== 0.) - Dx_incumbent .= sum(view(D, :, ind_ones_incumbent), dims = 2) - y_incumbent .= Dx_incumbent .>= c_threshold - - # Iterate - for i = 1:I - obj[i] = sum(y_incumbent) - delta_candidate = -1000000 - for e = 1:E - # Sample from neighbourhood - ind_ones2zeros_tmp .= sample(ind_ones_incumbent, N, replace=false) - ind_zeros2ones_tmp .= sample(ind_zeros_incumbent, N, replace=false) - - # Compute y and associated objective value - Dx_tmp .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_tmp), dims = 2) .- sum(view(D, :, ind_ones2zeros_tmp), dims = 2) - y_tmp .= Dx_tmp .>= c_threshold - - # Update objective difference - delta_tmp = sum(y_tmp) - obj[i] - - # Update candidate solution - if delta_tmp > delta_candidate - ind_ones2zeros_candidate .= ind_ones2zeros_tmp - ind_zeros2ones_candidate .= ind_zeros2ones_tmp - delta_candidate = delta_tmp - end - end - if delta_candidate > 0 - ind_ones_incumbent .= union(setdiff(ind_ones_incumbent, ind_ones2zeros_candidate), ind_zeros2ones_candidate) - ind_zeros_incumbent .= union(setdiff(ind_zeros_incumbent, ind_zeros2ones_candidate), ind_ones2zeros_candidate) - Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2) - y_incumbent .= Dx_incumbent .>= c_threshold - end - end - x_incumbent[ind_ones_incumbent] .= 1. - x_incumbent[ind_zeros_incumbent] .= 0. - LB = sum(y_incumbent) - return x_incumbent, LB, obj - -end - -function time_greedy_local_search(D::Array{Float64, 2}, c::Float64, n::Float64, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}) - @time greedy_local_search(D, c, n, N, I, E, x_init) -end - -#################### Simulated Annealing Local Search ####################### - -# Description: function implementing a simulated annealing-inspired local search for unpartitioned geographical regions -# -# Comments: 1) types of inputs should match those declared in argument list -# -# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations -# c - global criticality threshold -# n - number of sites to deploy -# N - number of locations to swap in order to obtain a neighbour of the incumbent solution -# I - number of iterations (outer loop), defines the number of times the incumbent solution may be updated -# E - number of epochs (inner loop), defines the number of neighbours of the incumbent solution sampled at each iteration -# x_init - initial solution, vector with entries in {0, 1}, with cardinality n and whose dimension is compatible with D -# T_init - initial temperature from which the (exponentially-decreasing) temperature schedule is constructed -# -# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration -# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective -# obj - vector storing the incumbent objective value at each iteration -# - -function simulated_annealing_local_search(D::Array{Float64, 2}, c::Float64, n::Float64, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}, T_init::Float64) - - W, L = size(D) - - # Pre-allocate lower bound vector - obj = Vector{Int64}(undef, I) - - # Pre-allocate x-related arrays - x_incumbent = Vector{Float64}(undef, L) - ind_ones_incumbent = Vector{Int64}(undef, convert(Int64, n)) - ind_zeros_incumbent = Vector{Int64}(undef, L-convert(Int64, n)) - ind_ones2zeros_candidate = Vector{Int64}(undef, N) - ind_zeros2ones_candidate = Vector{Int64}(undef, N) - ind_ones2zeros_tmp = Vector{Int64}(undef, N) - ind_zeros2ones_tmp = Vector{Int64}(undef, N) - - # Pre-allocate y-related arrays - y_incumbent = Array{Bool}(undef, W, 1) - y_tmp = Array{Bool}(undef, W, 1) - c_threshold = c .* ones(Float64, W, 1) - - Dx_incumbent = Array{Float64}(undef, W, 1) - Dx_tmp = Array{Float64}(undef, W, 1) - - # Initialise - ind_ones_incumbent = findall(x_init .== 1.) - ind_zeros_incumbent = findall(x_init .== 0.) - Dx_incumbent .= sum(view(D, :, ind_ones_incumbent), dims = 2) - y_incumbent .= Dx_incumbent .>= c_threshold - - # Iterate - for i = 1:I - obj[i] = sum(y_incumbent) - delta_candidate = -1000000 - for e = 1:E - # Sample from neighbourhood - ind_ones2zeros_tmp .= sample(ind_ones_incumbent, N, replace=false) - ind_zeros2ones_tmp .= sample(ind_zeros_incumbent, N, replace=false) - - # Compute y and associated objective value - Dx_tmp .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_tmp), dims = 2) .- sum(view(D, :, ind_ones2zeros_tmp), dims = 2) - y_tmp .= Dx_tmp .>= c_threshold - - # Update objective difference - delta_tmp = sum(y_tmp) - obj[i] - - # Update candidate solution - if delta_tmp > delta_candidate - ind_ones2zeros_candidate .= ind_ones2zeros_tmp - ind_zeros2ones_candidate .= ind_zeros2ones_tmp - delta_candidate = delta_tmp - end - end - if delta_candidate > 0 - ind_ones_incumbent .= union(setdiff(ind_ones_incumbent, ind_ones2zeros_candidate), ind_zeros2ones_candidate) - ind_zeros_incumbent .= union(setdiff(ind_zeros_incumbent, ind_zeros2ones_candidate), ind_ones2zeros_candidate) - Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2) - y_incumbent .= Dx_incumbent .>= c_threshold - else - T = T_init * exp(-10*i/I) - p = exp(delta_candidate / T) - d = Binomial(1, p) - b = rand(d) - if b == 1 - ind_ones_incumbent .= union(setdiff(ind_ones_incumbent, ind_ones2zeros_candidate), ind_zeros2ones_candidate) - ind_zeros_incumbent .= union(setdiff(ind_zeros_incumbent, ind_zeros2ones_candidate), ind_ones2zeros_candidate) - Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2) - y_incumbent .= Dx_incumbent .>= c_threshold - end - end - end - x_incumbent[ind_ones_incumbent] .= 1. - x_incumbent[ind_zeros_incumbent] .= 0. - LB = sum(y_incumbent) - return x_incumbent, LB, obj - -end - -function time_simulated_annealing_local_search(D::Array{Float64, 2}, c::Float64, n::Float64, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}, T_init::Float64) - @time simulated_annealing_local_search(D, c, n, N, I, E, x_init, T_init) -end - #################### Greedy Local Search w/ Partitioning Constraints (Dict Implementation) ####################### # Description: function implementing a greedy local search for geographical regions partitioned into a set of subregions @@ -629,10 +165,6 @@ function greedy_local_search_partition(D::Array{Float64, 2}, c::Float64, n::Vect end -function time_greedy_local_search_partition(D::Array{Float64, 2}, c::Float64, n::Vector{Int64}, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}, T_init::Float64, locations_regions_mapping::Dict{Int64, Int64}) - @time greedy_local_search_partition(D, c, n, N, I, E, x_init, T_init, locations_regions_mapping) -end - #################### Simulated Annealing Local Search w/ Partitioning Constraints (Dict Implementation) ####################### # Description: function implementing a simulated annealing-inspired local search for geographical regions partitioned into a set of subregions @@ -784,6 +316,127 @@ function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Flo end -function time_simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Float64, n::Vector{Int64}, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}, T_init::Float64, locations_regions_mapping::Dict{Int64, Int64}) - @time simulated_annealing_local_search_partition(D, c, n, N, I, E, x_init, T_init, locations_regions_mapping) +################### Multi-Threshold Greedy Heuristic ####################### + +# Description: function implementing a multi-threshold greedy algorithm with random selection of locations leading to same objective gain for unpartitioned geographical regions (single cardinality constraint) +# +# Comments: 1) types of inputs should match those declared in argument list +# 2) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value +# +# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations +# c - global criticality threshold +# n - number of sites to deploy +# +# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration +# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective +# + +function threshold_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64) + + W, L = size(D) + x_incumbent = zeros(Float64, L) + ind_compl_incumbent = [i for i in 1:L] + ind_incumbent = [] + Dx_incumbent = zeros(Float64, W) + y_incumbent = zeros(Float64, W) + obj_incumbent = 0 + Dx_tmp = Vector{Float64}(undef, W) + y_tmp = Vector{Float64}(undef, W) + locations_added, threshold = 0, 0 + @inbounds while locations_added < n + if locations_added < c + threshold = locations_added + 1 + obj_candidate = 0 + else + obj_candidate = obj_incumbent + end + ind_candidate_list = Vector{Int64}(undef, 0) + @inbounds for ind in setdiff(ind_compl_incumbent, ind_incumbent) + Dx_tmp .= Dx_incumbent .+ view(D, :, ind) + y_tmp .= Dx_tmp .>= threshold + obj_tmp = sum(y_tmp) + if obj_tmp > obj_candidate + ind_candidate_list = [ind] + obj_candidate = obj_tmp + elseif obj_tmp == obj_candidate + ind_candidate_list = union(ind, ind_candidate_list) + end + end + ind_candidate = sample(ind_candidate_list) + ind_incumbent = union(ind_incumbent, ind_candidate) + Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate) + y_incumbent .= Dx_incumbent .>= c + obj_incumbent = sum(y_incumbent) + locations_added += 1 + end + x_incumbent[ind_incumbent] .= 1. + return x_incumbent, obj_incumbent + +end + +function time_threshold_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64) + @time threshold_greedy_algorithm(D, c, n) end + +#################### Stochastic Multi-Threshold Greedy Algorithm ####################### + +# Description: function implementing the stochastic multi-threshold greedy algorithm (which generalises the stochastic greedy algorithm for submodular maximization (Mirzasoleiman et al, 2015)) +# +# Comments: 1) types of inputs should match those declared in argument list +# 2) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value +# +# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations +# c - global criticality threshold +# n - number of sites to deploy +# p - factor (between 0 and 1) defines the size of the random subset s = (L/n) x p +# +# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration +# LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective +# + +function stochastic_threshold_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64, p::Float64) + + W, L = size(D) + s = convert(Int64, round((L/n)*p)) + x_incumbent = zeros(Float64, L) + random_ind_set = Vector{Int64}(undef, s) + ind_compl_incumbent = [i for i in 1:L] + ind_incumbent = [] + Dx_incumbent = zeros(Float64, W) + y_incumbent = zeros(Float64, W) + obj_incumbent = 0 + Dx_tmp = Vector{Float64}(undef, W) + y_tmp = Vector{Float64}(undef, W) + locations_added, threshold = 0, 0 + @inbounds while locations_added < n + if locations_added < c + threshold = locations_added + 1 + obj_candidate = 0 + else + obj_candidate = obj_incumbent + end + ind_candidate_list = Vector{Int64}(undef, 0) + random_ind_set .= sample(ind_compl_incumbent, s, replace=false) + @inbounds for ind in random_ind_set + Dx_tmp .= Dx_incumbent .+ view(D, :, ind) + y_tmp .= Dx_tmp .>= threshold + obj_tmp = sum(y_tmp) + if obj_tmp > obj_candidate + ind_candidate_list = [ind] + obj_candidate = obj_tmp + elseif obj_tmp == obj_candidate + ind_candidate_list = union(ind, ind_candidate_list) + end + end + ind_candidate = sample(ind_candidate_list) + ind_incumbent = union(ind_incumbent, ind_candidate) + ind_compl_incumbent = setdiff(ind_compl_incumbent, ind_candidate) + Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate) + y_incumbent .= Dx_incumbent .>= c + obj_incumbent = sum(y_incumbent) + locations_added += 1 + end + x_incumbent[ind_incumbent] .= 1. + return x_incumbent, obj_incumbent + +end \ No newline at end of file diff --git a/src/jl/SitingHeuristics.jl b/src/jl/SitingHeuristics.jl index 3fa1201..d18958f 100644 --- a/src/jl/SitingHeuristics.jl +++ b/src/jl/SitingHeuristics.jl @@ -37,20 +37,6 @@ function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run) dt = (t2 - t1)/R println(dt) - elseif run == "SALSR" - - x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I) - x_init = zeros(Float64, L) - ind = collect(1:L) - x_ind = sample(ind, convert(Int64, deployment_dict[1])) - x_init[x_ind] .= 1. - n = convert(Float64, deployment_dict[1]) - - for r = 1:R - println("Run ", r, "/", R) - x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search(D, c, n, N, I, E, x_init, T_init) - end - else println("No such run available.") throw(ArgumentError) @@ -60,7 +46,7 @@ function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run) end -function main_GRED(deployment_dict, D, c, R, eps, run) +function main_GRED(deployment_dict, D, c, R, p, run) deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)]) D = convert.(Float64, D) @@ -69,27 +55,20 @@ function main_GRED(deployment_dict, D, c, R, eps, run) W, L = size(D) - if run == "RGH" + if run == "TGH" x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R) n = convert(Float64, deployment_dict[1]) for r = 1:R println("Run ", r, "/", R) - x_sol[r, :], LB_sol[r] = randomised_greedy_heuristic(D, c, n) + x_sol[r, :], LB_sol[r] = threshold_greedy_algorithm(D, c, n) end - elseif run == "CGA" + elseif run == "STGH" x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R) n = convert(Float64, deployment_dict[1]) + p = convert(Float64, p) for r = 1:R println("Run ", r, "/", R) - x_sol[r, :], LB_sol[r] = classic_greedy_algorithm(D, c, n) - end - elseif run == "SGA" - x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R) - n = convert(Float64, deployment_dict[1]) - eps = convert(Float64, 0.001) - for r = 1:R - println("Run ", r, "/", R) - x_sol[r, :], LB_sol[r] = stochastic_greedy_algorithm(D, c, n, eps) + x_sol[r, :], LB_sol[r] = stochastic_threshold_greedy_algorithm(D, c, n, p) end else println("No such run available.") @@ -111,10 +90,8 @@ function main_RAND(deployment_dict, D, c, I, R, run) W, L = size(D) if run == "RS" - x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R) n = convert(Float64, deployment_dict[1]) - for r = 1:R println("Run ", r, "/", R) x_sol[r, :], LB_sol[r] = random_search(D, c, n, I) diff --git a/src/main.py b/src/main.py index 1c5730e..1cbc092 100644 --- a/src/main.py +++ b/src/main.py @@ -63,32 +63,36 @@ if __name__ == '__main__': custom_log(' BB chosen to solve the IP.') params = siting_parameters['solution_method']['BB'] - if not isinstance(params['c'], int): - raise ValueError(' Values of c have to be integers for the Branch & Bound set-up.') + if not isinstance(params['c'], list): + raise ValueError(' Values of c have to be elements of a list for the Branch & Bound set-up.') - output_folder = init_folder(model_parameters, params['c'], '_BB') - with open(join(output_folder, 'config_model.yaml'), 'w') as outfile: - yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False) - with open(join(output_folder, 'config_techs.yaml'), 'w') as outfile: - yaml.dump(tech_parameters, outfile, default_flow_style=False, sort_keys=False) + for c in params['c']: - # Solver options for the MIP problem - opt = SolverFactory(params['solver']) - opt.options['MIPGap'] = params['mipgap'] - opt.options['Threads'] = params['threads'] - opt.options['TimeLimit'] = params['timelimit'] + output_folder = init_folder(model_parameters, c, f"_BB_c{c}") + with open(join(output_folder, 'config_model.yaml'), 'w') as outfile: + yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False) + with open(join(output_folder, 'config_techs.yaml'), 'w') as outfile: + yaml.dump(tech_parameters, outfile, default_flow_style=False, sort_keys=False) + + # Solver options for the MIP problem + opt = SolverFactory(params['solver']) + opt.options['MIPGap'] = params['mipgap'] + opt.options['Threads'] = params['threads'] + opt.options['TimeLimit'] = params['timelimit'] - instance = build_ip_model(deployment_dict, site_coordinates, criticality_data, params['c'], output_folder) - custom_log(' Sending model to solver.') + instance = build_ip_model(deployment_dict, site_coordinates, criticality_data, + c, output_folder, params['mir']) + custom_log(' Sending model to solver.') - results = opt.solve(instance, tee=True, keepfiles=False, - report_timing=False, logfile=join(output_folder, 'solver_log.log')) + results = opt.solve(instance, tee=False, keepfiles=False, + report_timing=False, logfile=join(output_folder, 'solver_log.log')) - objective = instance.objective() - x_values = array(list(instance.x.extract_values().values())) - comp_location_dict = retrieve_location_dict(x_values, model_parameters, site_positions) - retrieve_site_data(model_parameters, deployment_dict, site_coordinates, capacity_factors_data, criticality_data, - site_positions, params['c'], comp_location_dict, objective, output_folder) + objective = instance.objective() + x_values = array(list(instance.x.extract_values().values())) + comp_location_dict = retrieve_location_dict(x_values, model_parameters, site_positions) + retrieve_site_data(model_parameters, deployment_dict, site_coordinates, capacity_factors_data, + criticality_data, site_positions, c, comp_location_dict, objective, + output_folder, benchmarks=False) elif siting_parameters['solution_method']['MIRSA']['set']: @@ -96,7 +100,7 @@ if __name__ == '__main__': params = siting_parameters['solution_method']['MIRSA'] if not isinstance(params['c'], list): - raise ValueError(' Values of c have to elements of a list for the heuristic set-up.') + raise ValueError(' Values of c have to be elements of a list for the heuristic set-up.') jl_dict = generate_jl_input(deployment_dict, site_coordinates) @@ -130,7 +134,7 @@ if __name__ == '__main__': jl_locations = retrieve_location_dict(jl_selected_seed, model_parameters, site_positions) retrieve_site_data(model_parameters, deployment_dict, site_coordinates, capacity_factors_data, criticality_data, site_positions, c, jl_locations, jl_objective_seed, - output_folder, benchmarks=True) + output_folder, benchmarks=False) elif siting_parameters['solution_method']['RAND']['set']: @@ -168,12 +172,12 @@ if __name__ == '__main__': jl_locations = retrieve_location_dict(jl_selected_seed, model_parameters, site_positions) retrieve_site_data(model_parameters, deployment_dict, site_coordinates, capacity_factors_data, criticality_data, site_positions, c, jl_locations, jl_objective_seed, - output_folder, benchmarks=True) + output_folder, benchmarks=False) elif siting_parameters['solution_method']['GRED']['set']: - custom_log(' GRED chosen to solve the IP. Opening a Julia instance.') params = siting_parameters['solution_method']['GRED'] + custom_log(f" GRED_{params['algorithm']} chosen to solve the IP. Opening a Julia instance.") if not isinstance(params['c'], list): raise ValueError(' Values of c have to elements of a list for the heuristic set-up.') @@ -187,10 +191,13 @@ if __name__ == '__main__': for c in params['c']: print('Running heuristic for c value of', c) + start = time.time() jl_selected, jl_objective = Main.main_GRED(jl_dict['deployment_dict'], criticality_data, c, - params['no_runs'], params['epsilon'], params['algorithm']) + params['no_runs'], params['p'], params['algorithm']) + end = time.time() + print(f"Average CPU time: {round((end-start)/params['no_runs'], 1)} s") - output_folder = init_folder(model_parameters, c, suffix='_GRED') + output_folder = init_folder(model_parameters, c, suffix=f"_GRED_{params['algorithm']}_p{params['p']}") pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb')) pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb')) @@ -202,7 +209,7 @@ if __name__ == '__main__': jl_locations = retrieve_location_dict(jl_selected_seed, model_parameters, site_positions) retrieve_site_data(model_parameters, deployment_dict, site_coordinates, capacity_factors_data, criticality_data, site_positions, c, jl_locations, jl_objective_seed, - output_folder, benchmarks=True) + output_folder, benchmarks=False) else: raise ValueError(' This solution method is not available. ') diff --git a/src/models.py b/src/models.py index 7c311f4..60bb9e5 100644 --- a/src/models.py +++ b/src/models.py @@ -1,14 +1,14 @@ from os.path import join from numpy import arange -from pyomo.environ import ConcreteModel, Var, Binary, maximize +from pyomo.environ import ConcreteModel, Var, Binary, maximize, NonNegativeReals from pyomo.opt import ProblemFormat from pypsa.opt import l_constraint, LConstraint, l_objective, LExpression from tools import retrieve_index_dict -def build_ip_model(deployment_dict, coordinate_dict, critical_matrix, c, output_folder, write_lp=False): +def build_ip_model(deployment_dict, coordinate_dict, critical_matrix, c, output_folder, mir=False, write_lp=False): """Model build-up. Parameters: @@ -41,7 +41,10 @@ def build_ip_model(deployment_dict, coordinate_dict, critical_matrix, c, output_ model.L = arange(1, no_locations + 1) model.x = Var(model.L, within=Binary) - model.y = Var(model.W, within=Binary) + if not mir: + model.y = Var(model.W, within=Binary) + else: + model.y = Var(model.W, within=NonNegativeReals, bounds=(0, 1)) activation_constraint = {} diff --git a/src/tools.py b/src/tools.py index 2d45a0c..4905edc 100644 --- a/src/tools.py +++ b/src/tools.py @@ -673,7 +673,7 @@ def retrieve_index_dict(deployment_vector, coordinate_dict): def retrieve_site_data(model_parameters, deployment_dict, coordinates_dict, output_data, criticality_data, - location_mapping, c, site_coordinates, objective, output_folder, benchmarks=True): + location_mapping, c, site_coordinates, objective, output_folder, benchmarks=False): output_by_tech = collapse_dict_region_level(output_data) time_slice = model_parameters['time_slice'] @@ -752,7 +752,7 @@ def retrieve_site_data(model_parameters, deployment_dict, coordinates_dict, outp # Capacity credit sites. - load_data_fn = join(model_parameters['data_path'], 'input/load_data', 'load_2009_2018.csv') + load_data_fn = join(model_parameters['data_path'], 'input/load_data', 'load_2000_2019.csv') load_data = read_csv(load_data_fn, index_col=0) load_data.index = to_datetime(load_data.index) load_data = load_data[(load_data.index > time_slice[0]) & (load_data.index < time_slice[1])] -- GitLab