diff --git a/README.md b/README.md
index 3f177f78ef1b4ba4e53d05f9fbdab43221ed04ec..be99519a3de3730366c219ab6e10dcbef0d161f4 100644
--- a/README.md
+++ b/README.md
@@ -1,29 +1,33 @@
-# resite
+# resite_ip
 
-This file provides the information required to set up the __resite__ tool. Also, it provides a minimal example for siting
-RES assets.  
+This branch of the `resite_ip` repository contains Python scripts used to pre- and post-process application-related data
+for the max k-multicover problem introduced in the following publication: "Siting Renewable Power Generation Assets with Combinatorial Optimisation", Mathias Berger et al., Optimization Letters, 2021. DOI: https://doi.org/10.1007/s11590-021-01795-0
 
-# Setup
+## Setup
 
 This module runs in any 3.x version of Python. The Python packages required to run this module are listed 
 in the `requirements.yml` file included in the repository. Their installation can be easily achieved via the the following
 command that builds a separate environment for this module from the `yml` file:
    
     conda env create -f environment.yml
-
-The tool requires to installation of an MILP solver. At the time of writing, `gurobi` is the only solver this 
-model supports (see installation details [here](https://www.gurobi.com/documentation/8.1/remoteservices/installation.html).
+    
+In addition, this repository requires the cloning of the `maxk_multicover` repository containing a set of `Julia` scripts and 
+available [here](https://gitlab.uliege.be/smart_grids/public/maxk_multicover).
+The latter repository should be cloned in the same root folder where the `resite_ip` repository is used. Once this is done,
+the `maxk_multicover` folder has to be added to the PYTHONPATH.
+
+The tool also requires to installation of the `gurobi` solver. The current version of the repository is tested and the outcomes of different
+algorithms are benchmarked against Gurobi 9.1.
    
-# Example run
-
-To be added.
-
-# Citing
-Please cite the following paper if you use this software in your research.
+## Repository content
 
-To be added.
+The repository contains, besides three `yaml` files used for model configuration and environment installation, the following four scripts within the `src` folder:
+1. The `era5data.py` script provides the code to download reanalysis data via the cdsAPI of ECMWF 
+2. The `helpers.py` script contains various methods for data manipulation and pre-processing
+3. The `tools.py` script gathers the main functions enabling the conversion of raw reanalysis data in a structured form, ready to be fed to the various siting routines
+4. The `main.py` is mainly used to call the different siting routines embedded in this repository
 
-# License
+## License
 This software is released as free software under the [GPLv3](http://www.gnu.org/licenses/gpl-3.0.en.html).
 
 
diff --git a/config_model.yml b/config_model.yml
index cd35411bf5d3c4ac897be257bdc76d382ecfba85..65294b6b084e57c39882e286c21457f719872a6a 100644
--- a/config_model.yml
+++ b/config_model.yml
@@ -1,70 +1,70 @@
-# Path to data folder
-#data_path: 'D:/ULg_PhD_work/datasets/resite_ip/'
-data_path: '/home/dcradu/data/resite_ip/'
+# Path to data folder (where input/output data is stored - not necessarily the src folder)
+data_path: 'D:/ULg_PhD_work/datasets/resite_ip/'
+# Path to the maxk_multicover algorithms.jl file (might need an updated PYTHONPATH for this one
+julia_models_path: '../../maxk_multicover/src/algorithms.jl'
 
-# Spatial resolution (in degrees) of the potential sites.
+# Spatial resolution (in degrees) of the ERA5 data (depends on what data was downloaded via era5data.py script)
 spatial_resolution: 0.25
-# Start time and end time of the analysis.
-time_slice: ['2011-01-01T00:00', '2020-12-31T23:00']
-# Technologies to deploy.
+# Start time and end time of the analysis (again, depends on what coverage is available based on download)
+time_slice: ['2018-01-01T00:00', '2018-01-31T23:00']
+# Region (ISO2) to cover within the ERA5 dataset. This key is basically retrieving the shape of the underlying region
+# (e.g., if "DE" is given, the shapefile of the German territory is used to identify which ERA5 points fall within
 regions: ['EU']
+# Technology to consider (for now limited to one technology only, available choices: 'wind_onshore', 'wind_offshore',
+# 'pv_utility', 'pv_residential').
 technologies: ['wind_onshore']
+# Number of deployments to consider (integer value)
 deployments: [[560]]
 
 siting_params:
+  # Time-window length (any integer value smaller than the length of the time horizon)
+  delta: 1
+  # Defines the smoothing across time windows of length \delta (available options include 'mean', 'median')
   smooth_measure: 'mean'
-  # Defines how \alpha is considered in space and time.
+  # Defines how criticality is defined with respect to load time series (e.g., 'load_central' defines criticality with
+  # respect to the aggregate load of all countries included in the study, whereas 'load_partition' defines criticality
+  # in each country based on the corresponding load time series)
   alpha: 'load_central'
-  # Normalization procedures (detailed in tools.py). (min, max)
   norm_type: 'max'
-  # Time-window length used to compute the criticality indicator. Integer value.
-  delta: 1
-  # Solution method: BB or HEU or RAND or GRED.
+
+  # Covering parameter
   c: 1
+  # Solution method (options: MIP, MIR, RG, RGP, RSSA, MIRSA, RGPSA, RS)
+  algorithm: 'RGPSA'
 
-  solution_method:
-    BB:
-      # Branch & Bound
-      set: True
-      mir: False
+  method_params:
+    MIP:
       solver: 'gurobi'
       mipgap: 0.01
       timelimit: 43200
       threads: 0
-    LS:
-      set: False
-      neighborhood: 1
+    MIR:
+      solver: 'gurobi'
+      mipgap: 0.01
+      timelimit: 1800
+      threads: 0
+    MIRSA:
+      radius: 1
+      no_iterations: 2000
+      no_epochs: 500
+      initial_temp: 100.
+    RGPSA:
+      radius: 1
       no_iterations: 2000
       no_epochs: 500
       initial_temp: 100.
-      no_runs: 100
-      algorithm: 'MIR' # 'SGH', 'RS'
-    GRED_DET:
-      set: False
+      p: 0.05
+      init_runs: 20
+    RSSA:
+      radius: 1
+      samples: 10000
+      no_iterations: 2000
+      no_epochs: 500
+      initial_temp: 100.
+    RG:
       no_runs: 10
-      p: 0
-      algorithm: 'TGH'
-    GRED_STO:
-      set: False
+    RGP:
       p: 0.05
       no_runs: 300
-      algorithm: 'STGH'
-    RAND:
-      # Random Search
-      set: False
-      no_iterations: 1000000
-      no_runs: 1
-      algorithm: 'RS'
-    CROSS:
-      # Cross validation
-      set: False
-      k: 2
-      no_years: 10
-      no_years_train: 5
-      no_years_test: 5
-      no_experiments: 50
-      no_runs_per_experiment: 1
-      criterion: 'max'
-      method: 'custom' # 'k_fold'
-      algorithm: 'mirsa'
-
+    RS:
+      samples: 1000000
diff --git a/config_techs.yml b/config_techs.yml
index a81a0226857c1b4d17aad59ac73af736b989f9b7..e0458f7ec4cd4b1a8c07a2336b68b48c4de0110b 100644
--- a/config_techs.yml
+++ b/config_techs.yml
@@ -1,4 +1,26 @@
 # Config file for various conversion technologies.
+# technology:
+#  where: 'onshore'/'offshore' - NOT TO BE ALTERED
+#  filters: filters used to remove candidate locations based on land utilization criteria -
+#            to choose from ['water_mask', 'bathymetry', 'distance', 'resource_quality', 'forestry',
+#            'population_density', 'orography', 'latitude']
+#  converter_IV: converter to be used for class IV winds - set of available converters available at
+#                data/transfer_functions/data_wind_turbines.csv
+#  converter_III: converter to be used for class III winds
+#  converter_II: converter to be used for class II winds
+#  converter_I: converter to be used for class I winds
+#  resource: NOT TO BE ALTERED
+#  deployment: NOT TO BE ALTERED
+#  resource_threshold: average wind speed (m/s) under which candidate sites are discarded
+#  population_density_threshold_low: lower end of population density threshold limit to discard candidate sites
+#  population_density_threshold_high: upper end of population density threshold limit to discard candidate sites
+#  depth_threshold_low: lower end of depth threshold limit to discard candidate sites (for offshore)
+#  depth_threshold_high: upper end of depth threshold limit to discard candidate sites (for offshore)
+#  altitude_threshold: altitude threshold (in m) limit to discard candidate sites
+#  terrain_slope_threshold: terrain slope (in %) threshold limit to discard candidate sites
+#  forestry_ratio_threshold: forestry threshold (in % of cell area) limit to discard candidate sites
+#  latitude_threshold: latitude threshold limit (in degrees) to discard candidate sites
+
 wind_onshore:
   where: 'onshore'
   filters: ['water_mask']
@@ -84,7 +106,6 @@ pv_utility:
   forestry_ratio_threshold: 0.8
   latitude_threshold: 65.
 
-#TODO: fix pv_residential filters
 pv_residential:
   where: 'onshore'
   filters: ['population_density', 'water_mask']
diff --git a/requirements.yml b/requirements.yml
index af87dde193108af6d80166461b2519a0d86032fe..cc01d8efa4625f07132efa0dd2e7bc4af5a6c727 100644
--- a/requirements.yml
+++ b/requirements.yml
@@ -6,8 +6,6 @@ dependencies:
   - bottleneck
   - cdsapi
   - dask
-  - matplotlib
-  - pyomo
   - scipy
   - toolz
   - xarray
@@ -15,5 +13,5 @@ dependencies:
   - pyyaml
   - xlrd
   - geopy
-  - pypsa
-  - shapely
\ No newline at end of file
+  - shapely
+  - julia
\ No newline at end of file
diff --git a/src/helpers.py b/src/helpers.py
index c2e049540417462be7ae983d2d942f90b2a3fbe9..3d82a1eb654206908d52078b42083d8397f6630f 100644
--- a/src/helpers.py
+++ b/src/helpers.py
@@ -452,40 +452,6 @@ def get_partition_index(input_dict):
 
     return index_dict
 
-
-def init_folder(parameters, c, suffix=None):
-    """Initilize an output folder.
-
-    Parameters:
-    ------------
-    parameters : dict
-        Parameters dictionary.
-
-    Returns:
-    ------------
-    path : str
-        Relative path of the folder.
-
-    """
-    output_data_path = join(parameters['data_path'], 'output')
-
-    no_locs = str(sum(parameters['deployments']))
-    no_part = str(len(parameters['regions']))
-    no_yrs = str(int(round((to_datetime(parameters['time_slice'][1]) -
-                            to_datetime(parameters['time_slice'][0])) / timedelta64(1, 'Y'), 0)))
-    c = str(c)
-
-    if not isdir(output_data_path):
-        makedirs(abspath(output_data_path))
-
-    path = abspath(output_data_path + '/' + no_yrs + 'y_n' + no_locs + '_k' + no_part + '_c' + c + suffix)
-    makedirs(path)
-
-    custom_log(f"Folder path is: {path}")
-
-    return path
-
-
 def generate_jl_input(deployment_dict, filtered_coordinates):
 
     concat_deployment_dict = concatenate_dict_keys(deployment_dict)
@@ -495,14 +461,14 @@ def generate_jl_input(deployment_dict, filtered_coordinates):
     for idx, region in enumerate(region_list):
         int_to_region_map[region] = idx + 1
 
-    deployment_dict_int = dict(zip(int_to_region_map.values(), concat_deployment_dict.values()))
+    k = sum(deployment_dict[region][tech] for region in deployment_dict for tech in deployment_dict[region])
 
     index_dict = concatenate_dict_keys(get_partition_index(filtered_coordinates))
     index_dict_swap = {k: oldk for oldk, oldv in index_dict.items() for k in oldv}
     for key, value in index_dict_swap.items():
         index_dict_swap[key] = int_to_region_map[value]
 
-    output_dict = {'deployment_dict': deployment_dict_int,
+    output_dict = {'k': k,
                    'index_dict': index_dict_swap}
 
     return output_dict
diff --git a/src/jl/MCP_heuristics.jl b/src/jl/MCP_heuristics.jl
deleted file mode 100644
index f156feabc2e1ea0f1ec53b7a85ff7208aaaa7a4c..0000000000000000000000000000000000000000
--- a/src/jl/MCP_heuristics.jl
+++ /dev/null
@@ -1,611 +0,0 @@
-using StatsBase
-using Distributions
-
-#################### Random Search Algorithm #######################
-
-function random_search(D::Array{Float64, 2}, c::Float64, n::Float64, R::Int64)
-  W, L = size(D)
-  n = convert(Int64, n)
-  x_incumbent = zeros(Float64, L)
-  ind_set = [l for l in 1:L]
-  ind_incumbent = Vector{Int64}(undef, n)
-  ind_candidate = Vector{Int64}(undef, n)
-  Dx_candidate = Vector{Float64}(undef, W)
-  Dx_init = zeros(Float64, W)
-  y_candidate = Vector{Float64}(undef, W)
-  obj_incumbent = 0
-  @inbounds for r in 1:R
-    sample!(ind_set, ind_candidate, replace=false)
-    Dx_candidate .= Dx_init
-    @inbounds for ind in ind_candidate
-      Dx_candidate .= Dx_candidate .+ view(D, :, ind)
-    end
-    y_candidate .= Dx_candidate .>= c
-    obj_candidate = sum(y_candidate)
-    if obj_candidate >= obj_incumbent
-      ind_incumbent .= ind_candidate
-      obj_incumbent = obj_candidate
-    end
-  end
-  x_incumbent[ind_incumbent] .= 1.
-  return x_incumbent, obj_incumbent
-end
-
-#################### Greedy Local Search w/ Partitioning Constraints (Dict Implementation) #######################
-
-# Description: function implementing a greedy local search for geographical regions partitioned into a set of subregions
-#
-# Comments: 1) types of inputs should match those declared in argument list
-#           2) implementation uses both dict and array data structures
-#
-# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations
-#         c - global criticality threshold
-#         n - number of sites to deploy
-#         N - number of locations to swap in order to obtain a neighbour of the incumbent solution
-#         I - number of iterations (outer loop), defines the number of times the incumbent solution may be updated
-#         E - number of epochs (inner loop), defines the number of neighbours of the incumbent solution sampled at each iteration
-#         x_init - initial solution, vector with entries in {0, 1}, with cardinality n and whose dimension is compatible with D
-#         locations_regions_mapping - dictionary associating its subregion (value) to each location (key)
-#
-# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration
-#          LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective
-#          obj - vector storing the incumbent objective value at each iteration
-#
-
-function greedy_local_search_partition(D::Array{Float64, 2}, c::Float64, n::Vector{Int64}, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}, locations_regions_mapping::Dict{Int64, Int64})
-
-  W, L = size(D)
-  P = maximum(values(locations_regions_mapping))
-
-  # Pre-allocate lower bound vector
-  obj = Vector{Int64}(undef, I)
-
-  # Pre-allocate x-related containers
-  x_incumbent = Vector{Float64}(undef, L)
-  ind_ones2zeros_candidate = Vector{Int64}(undef, N)
-  ind_zeros2ones_candidate = Vector{Int64}(undef, N)
-  ind_ones2zeros_tmp = Vector{Int64}(undef, N)
-  ind_zeros2ones_tmp = Vector{Int64}(undef, N)
-
-  regions = [i for i in 1:P]
-  sample_count_per_region = Vector{Int64}(undef, P)
-  init_sample_count_per_region = zeros(Int64, P)
-  ind_samples_per_region_tmp = Vector{Int64}(undef, P+1)
-  ind_samples_per_region_candidate = Vector{Int64}(undef, P+1)
-  locations_count_per_region = zeros(Int64, P)
-  index_range_per_region = Vector{Int64}(undef, P+1)
-
-  @inbounds for i = 1:L
-    locations_count_per_region[locations_regions_mapping[i]] += 1
-  end
-
-  ind_ones_incumbent = Dict([(r, Vector{Int64}(undef, n[r])) for r in regions])
-  ind_zeros_incumbent = Dict([(r, Vector{Int64}(undef, locations_count_per_region[r]-n[r])) for r in regions])
-
-  index_range_per_region[1] = 1
-  @inbounds for j = 1:P
-    index_range_per_region[j+1] = index_range_per_region[j] + locations_count_per_region[j]
-  end
-
-  # Pre-allocate y-related arrays
-  y_incumbent = Array{Bool}(undef, W, 1)
-  y_tmp = Array{Bool}(undef, W, 1)
-  c_threshold = c .* ones(Float64, W, 1)
-
-  Dx_incumbent = zeros(Float64, W, 1)
-  Dx_tmp = Array{Float64}(undef, W, 1)
-
-  # Initialise
-  ind_ones, counter_ones = findall(x_init .== 1.), zeros(Int64, P)
-  @inbounds for ind in ind_ones
-    p = locations_regions_mapping[ind]
-    counter_ones[p] += 1
-    ind_ones_incumbent[p][counter_ones[p]] = ind
-    Dx_incumbent .+= view(D, :, ind)
-  end
-  y_incumbent .= Dx_incumbent .>= c_threshold
-
-  ind_zeros, counter_zeros = findall(x_init .== 0.), zeros(Int64, P)
-  for ind in ind_zeros
-    p = locations_regions_mapping[ind]
-    counter_zeros[p] += 1
-    ind_zeros_incumbent[p][counter_zeros[p]] = ind
-  end
-  ind_samples_per_region_tmp[1] = 1
-
-  # Iterate
-  @inbounds for i = 1:I
-    obj[i] = sum(y_incumbent)
-    delta_candidate = -1000000
-    @inbounds for e = 1:E
-      # Sample from neighbourhood
-      sample_count_per_region .= init_sample_count_per_region
-      @inbounds while sum(sample_count_per_region) < N
-        p = sample(regions)
-        if (sample_count_per_region[p] < n[p]) && (sample_count_per_region[p] < locations_count_per_region[p] - n[p])
-          sample_count_per_region[p] += 1
-        end
-      end
-
-      @inbounds for i = 1:P
-        ind_samples_per_region_tmp[i+1] = ind_samples_per_region_tmp[i] + sample_count_per_region[i]
-        if sample_count_per_region[i] != 0
-          view(ind_ones2zeros_tmp, ind_samples_per_region_tmp[i]:(ind_samples_per_region_tmp[i+1]-1)) .= sample(ind_ones_incumbent[i], sample_count_per_region[i], replace=false)
-          view(ind_zeros2ones_tmp, ind_samples_per_region_tmp[i]:(ind_samples_per_region_tmp[i+1]-1)) .= sample(ind_zeros_incumbent[i], sample_count_per_region[i], replace=false)
-        end
-      end
-
-      # Compute y and associated objective value
-      Dx_tmp .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_tmp), dims = 2) .- sum(view(D, :, ind_ones2zeros_tmp), dims = 2)
-      y_tmp .= Dx_tmp .>= c_threshold
-
-      # Update objective difference
-      delta_tmp = sum(y_tmp) - obj[i]
-
-      # Update candidate solution
-      if delta_tmp > delta_candidate
-        ind_ones2zeros_candidate .= ind_ones2zeros_tmp
-        ind_zeros2ones_candidate .= ind_zeros2ones_tmp
-        ind_samples_per_region_candidate .= ind_samples_per_region_tmp
-        delta_candidate = delta_tmp
-      end
-    end
-    if delta_candidate > 0
-      @inbounds for i = 1:P
-        ind_ones_incumbent[i] .= union(setdiff(ind_ones_incumbent[i], view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
-        ind_zeros_incumbent[i] .= union(setdiff(ind_zeros_incumbent[i], view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
-      end
-      Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2)
-      y_incumbent .= Dx_incumbent .>= c_threshold
-    end
-  end
-  @inbounds for i in 1:P
-    x_incumbent[ind_ones_incumbent[i]] .= 1.
-    x_incumbent[ind_zeros_incumbent[i]] .= 0.
-  end
-  LB = sum(y_incumbent)
-  return x_incumbent, LB, obj
-
-end
-
-#################### Simulated Annealing Local Search w/ Partitioning Constraints (Dict Implementation) #######################
-
-# Description: function implementing a simulated annealing-inspired local search for geographical regions partitioned into a set of subregions
-#
-# Comments: 1) types of inputs should match those declared in argument list
-#           2) implementation uses both dict and array data structures
-#
-# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations
-#         c - global criticality threshold
-#         n - number of sites to deploy
-#         N - number of locations to swap in order to obtain a neighbour of the incumbent solution
-#         I - number of iterations (outer loop), defines the number of times the incumbent solution may be updated
-#         E - number of epochs (inner loop), defines the number of neighbours of the incumbent solution sampled at each iteration
-#         x_init - initial solution, vector with entries in {0, 1}, with cardinality n and whose dimension is compatible with D
-#         T_init - initial temperature from which the (exponentially-decreasing) temperature schedule is constructed
-#         locations_regions_mapping - dictionary associating its subregion (value) to each location (key)
-#
-# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration
-#          LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective
-#          obj - vector storing the incumbent objective value at each iteration
-#
-
-function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Float64, n::Vector{Int64}, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}, T_init::Float64, locations_regions_mapping::Dict{Int64, Int64})
-
-  W, L = size(D)
-  P = maximum(values(locations_regions_mapping))
-
-  # Pre-allocate lower bound vector
-  obj = Vector{Int64}(undef, I)
-
-  # Pre-allocate x-related containers
-  x_incumbent = Vector{Float64}(undef, L)
-  ind_ones2zeros_candidate = Vector{Int64}(undef, N)
-  ind_zeros2ones_candidate = Vector{Int64}(undef, N)
-  ind_ones2zeros_tmp = Vector{Int64}(undef, N)
-  ind_zeros2ones_tmp = Vector{Int64}(undef, N)
-
-  regions = [i for i in 1:P]
-  sample_count_per_region = Vector{Int64}(undef, P)
-  init_sample_count_per_region = zeros(Int64, P)
-  ind_samples_per_region_tmp = Vector{Int64}(undef, P+1)
-  ind_samples_per_region_candidate = Vector{Int64}(undef, P+1)
-  locations_count_per_region = zeros(Int64, P)
-  index_range_per_region = Vector{Int64}(undef, P+1)
-
-  @inbounds for i = 1:L
-    locations_count_per_region[locations_regions_mapping[i]] += 1
-  end
-
-  ind_ones_incumbent = Dict([(r, Vector{Int64}(undef, n[r])) for r in regions])
-  ind_zeros_incumbent = Dict([(r, Vector{Int64}(undef, locations_count_per_region[r]-n[r])) for r in regions])
-
-  index_range_per_region[1] = 1
-  @inbounds for j = 1:P
-    index_range_per_region[j+1] = index_range_per_region[j] + locations_count_per_region[j]
-  end
-
-  # Pre-allocate y-related arrays
-  y_incumbent = Array{Bool}(undef, W, 1)
-  y_tmp = Array{Bool}(undef, W, 1)
-  c_threshold = c .* ones(Float64, W, 1)
-
-  Dx_incumbent = zeros(Float64, W, 1)
-  Dx_tmp = Array{Float64}(undef, W, 1)
-
-  # Initialise
-  ind_ones, counter_ones = findall(x_init .== 1.), zeros(Int64, P)
-  @inbounds for ind in ind_ones
-    p = locations_regions_mapping[ind]
-    counter_ones[p] += 1
-    ind_ones_incumbent[p][counter_ones[p]] = ind
-    Dx_incumbent .+= view(D, :, ind)
-  end
-  y_incumbent .= Dx_incumbent .>= c_threshold
-
-  ind_zeros, counter_zeros = findall(x_init .== 0.), zeros(Int64, P)
-  for ind in ind_zeros
-    p = locations_regions_mapping[ind]
-    counter_zeros[p] += 1
-    ind_zeros_incumbent[p][counter_zeros[p]] = ind
-  end
-  ind_samples_per_region_tmp[1] = 1
-
-  # Iterate
-  @inbounds for i = 1:I
-    obj[i] = sum(y_incumbent)
-    delta_candidate = -1000000
-    @inbounds for e = 1:E
-      # Sample from neighbourhood
-      sample_count_per_region .= init_sample_count_per_region
-      @inbounds while sum(sample_count_per_region) < N
-        p = sample(regions)
-        if (sample_count_per_region[p] < n[p]) && (sample_count_per_region[p] < locations_count_per_region[p] - n[p])
-          sample_count_per_region[p] += 1
-        end
-      end
-
-      @inbounds for i = 1:P
-        ind_samples_per_region_tmp[i+1] = ind_samples_per_region_tmp[i] + sample_count_per_region[i]
-        if sample_count_per_region[i] != 0
-          view(ind_ones2zeros_tmp, ind_samples_per_region_tmp[i]:(ind_samples_per_region_tmp[i+1]-1)) .= sample(ind_ones_incumbent[i], sample_count_per_region[i], replace=false)
-          view(ind_zeros2ones_tmp, ind_samples_per_region_tmp[i]:(ind_samples_per_region_tmp[i+1]-1)) .= sample(ind_zeros_incumbent[i], sample_count_per_region[i], replace=false)
-        end
-      end
-
-      # Compute y and associated objective value
-      Dx_tmp .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_tmp), dims = 2) .- sum(view(D, :, ind_ones2zeros_tmp), dims = 2)
-      y_tmp .= Dx_tmp .>= c_threshold
-
-      # Update objective difference
-      delta_tmp = sum(y_tmp) - obj[i]
-
-      # Update candidate solution
-      if delta_tmp > delta_candidate
-        ind_ones2zeros_candidate .= ind_ones2zeros_tmp
-        ind_zeros2ones_candidate .= ind_zeros2ones_tmp
-        ind_samples_per_region_candidate .= ind_samples_per_region_tmp
-        delta_candidate = delta_tmp
-      end
-    end
-    if delta_candidate > 0
-      @inbounds for i = 1:P
-        ind_ones_incumbent[i] .= union(setdiff(ind_ones_incumbent[i], view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
-        ind_zeros_incumbent[i] .= union(setdiff(ind_zeros_incumbent[i], view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
-      end
-      Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2)
-      y_incumbent .= Dx_incumbent .>= c_threshold
-    else
-      T = T_init * exp(-10*i/I)
-      p = exp(delta_candidate / T)
-      d = Binomial(1, p)
-      b = rand(d)
-      if b == 1
-        @inbounds for i = 1:P
-          ind_ones_incumbent[i] .= union(setdiff(ind_ones_incumbent[i], view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
-          ind_zeros_incumbent[i] .= union(setdiff(ind_zeros_incumbent[i], view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
-        end
-        Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2)
-        y_incumbent .= Dx_incumbent .>= c_threshold
-      end
-    end
-  end
-  @inbounds for i in 1:P
-    x_incumbent[ind_ones_incumbent[i]] .= 1.
-    x_incumbent[ind_zeros_incumbent[i]] .= 0.
-  end
-  LB = sum(y_incumbent)
-  return x_incumbent, LB, obj
-
-end
-
-################### Multi-Threshold Greedy Heuristic #######################
-
-# Description: function implementing a multi-threshold greedy algorithm with random selection of locations leading to same objective gain for unpartitioned geographical regions (single cardinality constraint)
-#
-# Comments: 1) types of inputs should match those declared in argument list
-#           2) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value
-#
-# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations
-#         c - global criticality threshold
-#         n - number of sites to deploy
-#
-# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration
-#          LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective
-#
-
-function threshold_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64)
-
-  W, L = size(D)
-  n = convert(Int64, n)
-  ind_compl_incumbent = [i for i in 1:L]
-  ind_incumbent = Vector{Int64}(undef, n)
-  Dx_incumbent = zeros(Float64, W)
-  obj_incumbent = 0
-  Dx_tmp = Vector{Float64}(undef, W)
-  y_tmp = Vector{Float64}(undef, W)
-  ind_candidate_list = zeros(Int64, L)
-  locations_added, threshold = 0, 0
-  @inbounds while locations_added < n
-    if locations_added < c
-      threshold = locations_added + 1
-      obj_candidate = 0
-    else
-      obj_candidate = obj_incumbent
-    end
-    ind_candidate_pointer = 1
-    @inbounds for ind in ind_compl_incumbent
-        Dx_tmp .= Dx_incumbent .+ view(D, :, ind)
-        y_tmp .= Dx_tmp .>= threshold
-        obj_tmp = sum(y_tmp)
-        if obj_tmp > obj_candidate
-          ind_candidate_pointer = 1
-          ind_candidate_list[ind_candidate_pointer] = ind
-          obj_candidate = obj_tmp
-          ind_candidate_pointer += 1
-        elseif obj_tmp == obj_candidate
-          ind_candidate_list[ind_candidate_pointer] = ind
-          ind_candidate_pointer += 1
-        end
-    end
-    ind_candidate = sample(view(ind_candidate_list, 1:ind_candidate_pointer-1))
-    ind_incumbent[locations_added+1] = ind_candidate
-    filter!(a -> a != ind_candidate, ind_compl_incumbent)
-    Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate)
-    obj_incumbent = obj_candidate
-    locations_added += 1
-  end
-  x_incumbent = zeros(Float64, L)
-  x_incumbent[ind_incumbent] .= 1.
-  return x_incumbent, obj_incumbent
-end
-
-function time_threshold_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64)
-  @time threshold_greedy_algorithm(D, c, n)
-end
-
-#################### Stochastic Multi-Threshold Greedy Algorithm #######################
-
-# Description: function implementing the stochastic multi-threshold greedy algorithm (which generalises the stochastic greedy algorithm for submodular maximization (Mirzasoleiman et al, 2015))
-#
-# Comments: 1) types of inputs should match those declared in argument list
-#           2) at every iteration, randomisation is used to select the location that should be removed from the locations set when several locations are "tied", i.e., removing each of them leads to the same decrease in objective value
-#
-# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations
-#         c - global criticality threshold
-#         n - number of sites to deploy
-#         p - factor (between 0 and 1) defines the size of the random subset s = (L/n) x p
-#
-# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration
-#          LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective
-#
-
-function stochastic_threshold_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Float64, p::Float64)
-
-  W, L = size(D)
-  s = convert(Int64, round(L*p))
-  random_ind_set = Vector{Int64}(undef, s)
-  ind_compl_incumbent = [i for i in 1:L]
-  ind_incumbent = Vector{Int64}(undef, convert(Int64, n))
-  Dx_incumbent = zeros(Float64, W)
-  obj_incumbent = 0
-  Dx_tmp = Vector{Float64}(undef, W)
-  y_tmp = Vector{Float64}(undef, W)
-  ind_candidate_list = zeros(Int64, L)
-  locations_added, threshold = 0, 0
-  @inbounds while locations_added < n
-    if locations_added < c
-      threshold = locations_added + 1
-      obj_candidate = 0
-    else
-      obj_candidate = obj_incumbent
-    end
-    ind_candidate_pointer = 1
-    sample!(ind_compl_incumbent, random_ind_set, replace=false)
-    @inbounds for ind in random_ind_set
-        Dx_tmp .= Dx_incumbent .+ view(D, :, ind)
-        y_tmp .= Dx_tmp .>= threshold
-        obj_tmp = sum(y_tmp)
-        if obj_tmp > obj_candidate
-          ind_candidate_pointer = 1
-          ind_candidate_list[ind_candidate_pointer] = ind
-          obj_candidate = obj_tmp
-          ind_candidate_pointer += 1
-        elseif obj_tmp == obj_candidate
-          ind_candidate_list[ind_candidate_pointer] = ind
-          ind_candidate_pointer += 1
-        end
-    end
-    ind_candidate = sample(view(ind_candidate_list, 1:ind_candidate_pointer-1))
-    ind_incumbent[locations_added+1] = ind_candidate
-    filter!(a -> a != ind_candidate, ind_compl_incumbent)
-    Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate)
-    obj_incumbent = obj_candidate
-    locations_added += 1
-  end
-  x_incumbent = zeros(Float64, L)
-  x_incumbent[ind_incumbent] .= 1.
-  return x_incumbent, obj_incumbent
-end
-
-function stochastic_threshold_greedy_algorithm_trajectory(D::Array{Float64,2}, c::Float64, n::Float64, p::Float64)
-  W, L = size(D)
-  s = convert(Int64, round((L/n)*p))
-  random_ind_set = Vector{Int64}(undef, s)
-  ind_compl_incumbent = [i for i in 1:L]
-  ind_incumbent = zeros(Int64, 0)
-  Dx_incumbent = zeros(Float64, W)
-  y_incumbent = zeros(Float64, W)
-  obj_incumbent = 0
-  obj_incumbent_vec = zeros(Float64, convert(Int64,n))
-  Dx_tmp = Vector{Float64}(undef, W)
-  y_tmp = Vector{Float64}(undef, W)
-  locations_added, threshold = 0, 0
-  @inbounds while locations_added < n
-    if locations_added < c
-      threshold = locations_added + 1
-      obj_candidate = 0
-    else
-      obj_candidate = obj_incumbent
-    end
-    ind_candidate_list = Vector{Int64}(undef, 0)
-    random_ind_set .= sample(ind_compl_incumbent, s, replace=false)
-    @inbounds for ind in random_ind_set
-        Dx_tmp .= Dx_incumbent .+ view(D, :, ind)
-        y_tmp .= Dx_tmp .>= threshold
-        obj_tmp = sum(y_tmp)
-        if obj_tmp > obj_candidate
-          ind_candidate_list = [ind]
-          obj_candidate = obj_tmp
-        elseif obj_tmp == obj_candidate
-          ind_candidate_list = union(ind, ind_candidate_list)
-        end
-    end
-    ind_candidate = sample(ind_candidate_list)
-    ind_incumbent = union(ind_incumbent, ind_candidate)
-    ind_compl_incumbent = setdiff(ind_compl_incumbent, ind_candidate)
-    Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate)
-    y_incumbent .= Dx_incumbent .>= threshold
-    obj_incumbent = sum(y_incumbent)
-    obj_incumbent_vec[locations_added+1] = obj_incumbent
-    locations_added += 1
-  end
-  x_incumbent = zeros(Float64, L)
-  x_incumbent[ind_ones_incumbent] .= 1.
-  return x_incumbent, obj_incumbent
-
-end
-
-#################### Simulated Annealing Local Search #######################
-
-# Description: function implementing a simulated annealing-inspired local search for unpartitioned geographical regions
-#
-# Comments: 1) types of inputs should match those declared in argument list
-#
-# Inputs: D - criticality matrix with entries in {0, 1}, where rows represent time windows and columns represent locations
-#         c - global criticality threshold
-#         n - number of sites to deploy
-#         N - number of locations to swap in order to obtain a neighbour of the incumbent solution
-#         I - number of iterations (outer loop), defines the number of times the incumbent solution may be updated
-#         E - number of epochs (inner loop), defines the number of neighbours of the incumbent solution sampled at each iteration
-#         x_init - initial solution, vector with entries in {0, 1}, with cardinality n and whose dimension is compatible with D
-#         T_init - initial temperature from which the (exponentially-decreasing) temperature schedule is constructed
-#
-# Outputs: x_incumbent - vector with entries in {0, 1} and cardinality n representing the incumbent solution at the last iteration
-#          LB_incumbent - objective value of incumbent solution, provides a lower bound on optimal objective
-#          obj - vector storing the incumbent objective value at each iteration
-#
-
-function simulated_annealing_local_search(D::Array{Float64, 2}, c::Float64, n::Float64, N::Int64, I::Int64, E::Int64, x_init::Array{Float64, 1}, T_init::Float64, legacy_locations::Vector{Int64})
-
-  W, L = size(D)
-  n = convert(Int64, n)
-  n_new = n - length(legacy_locations)
-
-  # Pre-allocate lower bound vector
-  obj = Vector{Int64}(undef, I)
-
-  # Pre-allocate x-related arrays
-  ind_ones_incumbent = Vector{Int64}(undef, n_new)
-  ind_ones_incumbent_filtered = Vector{Int64}(undef, n_new-N)
-  ind_zeros_incumbent = Vector{Int64}(undef, L-n)
-  ind_zeros_incumbent_filtered = Vector{Int64}(undef, L-n-N)
-  ind_ones2zeros_candidate = Vector{Int64}(undef, N)
-  ind_zeros2ones_candidate = Vector{Int64}(undef, N)
-  ind_ones2zeros_tmp = Vector{Int64}(undef, N)
-  ind_zeros2ones_tmp = Vector{Int64}(undef, N)
-
-  # Pre-allocate y-related arrays
-  y_incumbent = Array{Bool}(undef, W, 1)
-  y_tmp = Array{Bool}(undef, W, 1)
-
-  Dx_incumbent = Array{Float64}(undef, W, 1)
-  Dx_tmp = Array{Float64}(undef, W, 1)
-
-  # Initialise
-  ind_deployed = findall(x_init .== 1.)
-  Dx_incumbent .= sum(view(D, :, ind_deployed), dims = 2)
-  y_incumbent .= Dx_incumbent .>= c
-  ind_ones_incumbent .= filter(a -> !(a in legacy_locations), ind_deployed)
-  ind_zeros_incumbent .= findall(x_init .== 0.)
-
-  # Iterate
-  @inbounds for i = 1:I
-    obj[i] = sum(y_incumbent)
-    delta_candidate = -100000
-    @inbounds for e = 1:E
-      # Sample from neighbourhood
-      sample!(ind_ones_incumbent, ind_ones2zeros_tmp, replace=false)
-      sample!(ind_zeros_incumbent, ind_zeros2ones_tmp, replace=false)
-
-      # Compute y and associated objective value
-      @inbounds for j = 1:N
-        Dx_tmp .= Dx_incumbent .+ view(D, :, ind_zeros2ones_tmp[j]) .- view(D, :, ind_ones2zeros_tmp[j])
-      end
-      y_tmp .= Dx_tmp .>= c
-
-      # Update objective difference
-      delta_tmp = sum(y_tmp) - obj[i]
-
-      # Update candidate solution
-      if delta_tmp > delta_candidate
-        ind_ones2zeros_candidate .= ind_ones2zeros_tmp
-        ind_zeros2ones_candidate .= ind_zeros2ones_tmp
-        delta_candidate = delta_tmp
-      end
-    end
-    if delta_candidate > 0
-      ind_ones_incumbent_filtered .= filter(a -> !(a in ind_ones2zeros_candidate), ind_ones_incumbent)
-      ind_ones_incumbent[1:(n_new-N)] .= ind_ones_incumbent_filtered
-      ind_ones_incumbent[(n_new-N+1):n_new] .= ind_zeros2ones_candidate
-      ind_zeros_incumbent_filtered .= filter(a -> !(a in ind_zeros2ones_candidate), ind_zeros_incumbent)
-      ind_zeros_incumbent[1:(L-n-N)] .= ind_zeros_incumbent_filtered
-      ind_zeros_incumbent[(L-n-N+1):(L-n)] .= ind_ones2zeros_candidate
-      @inbounds for j = 1:N
-        Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_zeros2ones_candidate[j]) .- view(D, :, ind_ones2zeros_candidate[j])
-      end
-      y_incumbent .= Dx_incumbent .>= c
-    else
-      T = T_init * exp(-10*i/I)
-      p = exp(delta_candidate / T)
-      d = Binomial(1, p)
-      b = rand(d)
-      if b == 1
-        ind_ones_incumbent_filtered .= filter(a -> !(a in ind_ones2zeros_candidate), ind_ones_incumbent)
-        ind_ones_incumbent[1:(n_new-N)] .= ind_ones_incumbent_filtered
-        ind_ones_incumbent[(n_new-N+1):n_new] .= ind_zeros2ones_candidate
-        ind_zeros_incumbent_filtered .= filter(a -> !(a in ind_zeros2ones_candidate), ind_zeros_incumbent)
-        ind_zeros_incumbent[1:(L-n-N)] .= ind_zeros_incumbent_filtered
-        ind_zeros_incumbent[(L-n-N+1):(L-n)] .= ind_ones2zeros_candidate
-        @inbounds for j = 1:N
-          Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_zeros2ones_candidate[j]) .- view(D, :, ind_ones2zeros_candidate[j])
-        end
-        y_incumbent .= Dx_incumbent .>= c
-      end
-    end
-  end
-  obj_incumbent = sum(y_incumbent)
-  x_incumbent = zeros(Float64, L)
-  x_incumbent[ind_ones_incumbent] .= 1.
-  return x_incumbent, obj_incumbent, obj
-end
diff --git a/src/jl/SitingHeuristics.jl b/src/jl/SitingHeuristics.jl
deleted file mode 100644
index 7e76aa37b4688ab8d9d0c4a78dcd149bcc49b54b..0000000000000000000000000000000000000000
--- a/src/jl/SitingHeuristics.jl
+++ /dev/null
@@ -1,182 +0,0 @@
-using PyCall
-using Dates
-using Random
-
-include("optimisation_models.jl")
-include("MCP_heuristics.jl")
-include("cross_validation_tools.jl")
-
-pickle = pyimport("pickle")
-
-#################### Useful Functions #######################
-
-function myunpickle(filename)
-
-  r = nothing
-  @pywith pybuiltin("open")(filename,"rb") as f begin
-
-    r = pickle.load(f)
-  end
-  return r
-
-end
-
-function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run, p, data_path)
-
-  index_dict = Dict([(convert(Int64, k), convert(Int64, index_dict[k])) for k in keys(index_dict)])
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-
-  c = convert(Float64, c)
-  N = convert(Int64, N)
-  I = convert(Int64, I)
-  E = convert(Int64, E)
-  T_init = convert(Float64, T_init)
-  R = convert(Int64, R)
-  run = string(run)
-  p = convert(Float64, p)
-  data_path = string(data_path)
-  legacy_index = Vector{Int64}(undef, 0)
-
-  W, L = size(D)
-
-  P = maximum(values(index_dict))
-  n = convert(Float64, deployment_dict[1])
-
-  if run == "MIR"
-
-    x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I)
-    x_init = solve_MILP(D, c, n, "Gurobi")
-
-    for r = 1:R
-      x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search(D, c, n, N, I, E, x_init, T_init, legacy_index)
-    end
-
-  elseif run == "RS"
-
-    x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I)
-    x_init = zeros(Int64, L)
-    ind_set = [l for l in 1:L]
-    n_while = n
-
-    while n_while > 0
-      loc = sample(ind_set)
-      deleteat!(ind_set, findall(x->x==loc, ind_set))
-      x_init[loc] = 1
-      n_while -= 1
-    end
-
-    x_init = convert.(Float64, x_init)
-
-    for r = 1:R
-      x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search(D, c, n, N, I, E, x_init, T_init, legacy_index)
-    end
-
-  elseif run == "SGH"
-
-    sample_size = 10
-    LB_init = myunpickle(joinpath(data_path, "objective_vector.p"))
-    x_init = myunpickle(joinpath(data_path, "solution_matrix.p"))
-
-    x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I)
-
-    for r = 1:R
-      println(r, "/", R, "for", c)
-
-      LB_init_best = argmax(sample(LB_init, sample_size, replace=false))
-      x_init_best = x_init[LB_init_best, :]
-
-      x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search(D, c, n, N, I, E, x_init_best, T_init, legacy_index)
-    end
-
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol, obj_sol
-
-end
-
-function main_GRED(deployment_dict, D, c, R, p, run)
-
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-  c = convert(Float64, c)
-  R = convert(Int64, R)
-
-  W, L = size(D)
-
-  if run == "TGH"
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    for r = 1:R
-      x_sol[r, :], LB_sol[r] = threshold_greedy_algorithm(D, c, n)
-    end
-
-  elseif run == "STGH"
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    p = convert(Float64, p)
-    for r = 1:R
-      x_sol[r, :], LB_sol[r] = stochastic_threshold_greedy_algorithm(D, c, n, p)
-    end
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol
-
-end
-
-function main_RAND(deployment_dict, D, c, I, R, run)
-
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-  c = convert(Float64, c)
-  I = convert(Int64, I)
-  R = convert(Int64, R)
-
-  W, L = size(D)
-
-  if run == "RS"
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    for r = 1:R
-      x_sol[r, :], LB_sol[r] = random_search(D, c, n, I)
-    end
-
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol
-
-end
-
-function main_CROSS(D, c, n, k, number_years, number_years_training, number_years_testing, number_experiments, number_runs_per_experiment, criterion, cross_validation_method, algorithm)
-
-  D = convert.(Float64, D)
-  c = convert(Float64, c)
-  n = convert(Int64, n)
-  k = convert(Int64, k)
-  number_years = convert(Int64, number_years)
-  number_years_training = convert(Int64, number_years_training)
-  number_years_testing = convert(Int64, number_years_testing)
-  number_experiments = convert(Int64, number_experiments)
-  number_runs_per_experiment = convert(Int64, number_runs_per_experiment)
-  criterion = convert(String, criterion)
-  cross_validation_method = convert(String, cross_validation_method)
-  algorithm = convert(String, algorithm)
-
-  if cross_validation_method == "custom"
-      obj_training, obj_testing, ind_training, ind_testing = custom_cross_validation(D, c, n, number_years, number_years_training, number_years_testing, number_experiments, number_runs_per_experiment, criterion, algorithm);
-  elseif cross_validation_method == "k_fold"
-      obj_training, obj_testing, ind_training, ind_testing = k_fold_cross_validation(D, c, n, k, number_years, number_runs_per_experiment, criterion, algorithm);
-  end
-
-  return obj_training, obj_testing, ind_training, ind_testing
-
-end
diff --git a/src/jl/cross_validation_tools.jl b/src/jl/cross_validation_tools.jl
deleted file mode 100644
index f2e08df0938555a1c0c58a01e50f872887d20927..0000000000000000000000000000000000000000
--- a/src/jl/cross_validation_tools.jl
+++ /dev/null
@@ -1,458 +0,0 @@
-using StatsBase
-using JuMP
-using Gurobi
-using Statistics
-using PyCall
-
-pickle = pyimport("pickle")
-
-#################### Functions to Pickle and Unpickle #######################
-
-function myunpickle(filename)
-
-  r = nothing
-  @pywith pybuiltin("open")(filename,"rb") as f begin
-    r = pickle.load(f)
-  end
-  return r
-
-end
-
-function mypickle(filename, obj)
-
-  out = open(filename,"w")
-  pickle.dump(obj, out)
-  close(out)
-
- end
-
- #################### k-fold Cross Validation #######################
-
-function k_fold_cross_validation(D::Array{Float64, 2}, c::Float64, n::Int64, k::Int64, number_years::Int64, number_runs_per_experiment::Int64, criterion::String, algorithm::String="rga", number_periods_per_year::Int64=8760, p::Float64=0.05, N::Int64=1, I::Int64=2000, E::Int64=500, T_init::Float64=100.)
-
-    W, L = size(D)
-    years_pool, windows_pool = [i for i = 1:number_years], [[j+number_periods_per_year*(i-1) for j = 1:number_periods_per_year] for i = 1:number_years]
-    years_dict = Dict(years_pool .=> windows_pool)
-    if mod(number_years, k) != 0
-        unused_years = mod(number_years, k)
-        years_dropped = sample(years_pool, unused_years, replace=false)
-        filter!(d -> !(d.first in years_dropped), years_dict)
-        filter!(a -> !(a in years_dropped), years_pool)
-    end
-
-    if mod(number_runs_per_experiment, 2) == 0
-        number_runs_per_experiment += 1
-    end
-    runs = [r for r = 1:number_runs_per_experiment]
-
-    years_pool  .= sample(years_pool, length(years_pool), replace=false)
-    sample_size = convert(Int64, length(years_pool) / k)
-    samples, years_subsets = [i for i = 1:k], [years_pool[1+sample_size*(i-1):sample_size*i] for i = 1:k]
-    samples_dict = Dict(samples .=> years_subsets)
-
-    samples_testing, samples_training = 0, Vector{Int64}(undef, k-1)
-    years_testing, years_training = Vector{Int64}(undef, sample_size), Vector{Int64}(undef, length(years_pool)-sample_size)
-    n_rows_testing, n_rows_training = sample_size * number_periods_per_year, (length(years_pool)-sample_size) * number_periods_per_year
-    col_tmp_testing, col_tmp_training = Vector{Float64}(undef, n_rows_testing), Vector{Float64}(undef, n_rows_training)
-    col_list_testing, col_list_training = [l for l = 1:L], [l for l = 1:L]
-    col_zeros_testing, col_zeros_training = zeros(Float64, n_rows_testing), zeros(Float64, n_rows_training)
-    y_tmp_testing, y_tmp_training = Vector{Float64}(undef, n_rows_testing), Vector{Float64}(undef, n_rows_training)
-    D_testing, D_training = Array{Float64, 2}(undef, n_rows_testing, L), Array{Float64, 2}(undef, n_rows_training, L)
-    ind_testing, ind_training = Array{Int64, 2}(undef, k, n), Array{Int64, 2}(undef, k, n)
-    ind_tmp_testing, ind_tmp_training = Array{Int64, 2}(undef, number_runs_per_experiment, n), Array{Int64, 2}(undef, number_runs_per_experiment, n)
-    ind_tmp = Vector{Int64}(undef, n)
-    obj_testing, obj_training = Vector{Float64}(undef, k), Vector{Float64}(undef, k)
-    obj_tmp_testing, obj_tmp_training = Vector{Float64}(undef, number_runs_per_experiment), Vector{Float64}(undef, number_runs_per_experiment)
-
-    @inbounds for s in samples
-        samples_testing = samples[s]
-        samples_training .= filter(a -> a != samples[s], samples)
-        years_testing .= samples_dict[samples_testing]
-        years_training .= reduce(vcat,collect(samples_dict[j] for j in samples_training))
-
-        matrix_slicer!(D, D_training, years_training, years_dict, number_periods_per_year, col_list_training)
-        matrix_slicer!(D, D_testing, years_testing, years_dict, number_periods_per_year, col_list_testing)
-
-        println("Training Experiment ", s)
-        @inbounds for run in runs
-            println("Training Run ", run)
-            if algorithm == "mirsa"
-                ind_tmp .= mirsa(D_training, c, n, N, I, E, T_init)
-            elseif algorithm == "rga"
-                ind_tmp .= randomised_greedy_algorithm(D_training, c, n, p)
-            elseif algorithm == "ga"
-                ind_tmp .= greedy_algorithm(D_training, c, n)
-            end
-            obj_tmp_training[run] = evaluate_obj(D_training, c, ind_tmp, col_tmp_training, col_zeros_training, y_tmp_training)
-            ind_tmp_training[run,:] .= ind_tmp
-        end
-        if criterion == "median"
-            reference_run = findall(obj_tmp_training .== median(obj_tmp_training))[1]
-        elseif criterion == "max"
-            reference_run = findall(obj_tmp_training .== maximum(obj_tmp_training))[1]
-        end
-        ind_tmp .= view(ind_tmp_training, reference_run, :)
-        obj_training[s] = evaluate_obj(D_testing, c, ind_tmp, col_tmp_testing, col_zeros_testing, y_tmp_testing)
-        ind_training[s, :] .= ind_tmp
-
-        println("Testing Experiment ", s)
-        @inbounds for run in runs
-            println("Testing Run ", run)
-            if algorithm == "mirsa"
-                ind_tmp .= mirsa(D_testing, c, n, N, I, E, T_init)
-            elseif algorithm == "rga"
-                ind_tmp .= randomised_greedy_algorithm(D_testing, c, n, p)
-            elseif algorithm == "ga"
-                ind_tmp .= greedy_algorithm(D_testing, c, n)
-            end
-            obj_tmp_testing[run] = evaluate_obj(D_testing, c, ind_tmp, col_tmp_testing, col_zeros_testing, y_tmp_testing)
-            ind_tmp_testing[run,:] .= ind_tmp
-        end
-        if criterion == "median"
-            reference_run = findall(obj_tmp_testing .== median(obj_tmp_testing))[1]
-        elseif criterion == "max"
-            reference_run = findall(obj_tmp_testing .== maximum(obj_tmp_testing))[1]
-        end
-        obj_testing[s] = obj_tmp_testing[reference_run]
-        ind_testing[s, :] .= view(ind_tmp_testing, reference_run, :)
-    end
-    return ind_training, ind_testing, obj_training, obj_testing
-end
-
-function time_k_fold_cross_validation(D::Array{Float64, 2}, c::Float64, n::Int64, k::Int64, number_years::Int64, number_runs_per_experiment::Int64, criterion::String, algorithm::String="rga", number_periods_per_year::Int64=8760, p::Float64=0.05, N::Int64=1, I::Int64=2000, E::Int64=500, T_init::Float64=100.)
-  @time k_fold_cross_validation(D, c, n, k, number_years, number_runs_per_experiment, criterion)
-end
-
-#################### Custom Cross Validation #######################
-
-function custom_cross_validation(D::Array{Float64, 2}, c::Float64, n::Int64, number_years::Int64, number_years_training::Int64, number_years_testing::Int64, number_experiments::Int64, number_runs_per_experiment::Int64, criterion::String, algorithm::String="rga", number_periods_per_year::Int64=8760, p::Float64=0.05, N::Int64=1, I::Int64=2000, E::Int64=500, T_init::Float64=100.)
-
-    W, L = size(D)
-    years_pool, windows_pool = [i for i = 1:number_years], [[j+number_periods_per_year*(i-1) for j = 1:number_periods_per_year] for i = 1:number_years]
-    years_windows_dict = Dict(years_pool .=> windows_pool)
-
-    if (number_years_testing + number_years_training) != number_years
-        unused_years = number_years - (number_years_testing + number_years_training)
-        years_dropped = sample(years_pool, unused_years, replace=false)
-        filter!(d -> !(d.first in years_dropped), years_windows_dict)
-        filter!(a -> !(a in years_dropped), years_pool)
-    end
-
-    if mod(number_runs_per_experiment, 2) == 0
-        number_runs_per_experiment += 1
-    end
-
-    runs = [r for r = 1:number_runs_per_experiment]
-    experiments = [e for e = 1:number_experiments]
-
-    years_training_dict = Dict([(e, Vector{Int64}(undef, number_years_training)) for e in experiments])
-    years_testing_dict = Dict([(e, Vector{Int64}(undef, number_years_testing)) for e in experiments])
-    @inbounds for e in experiments
-        years_training_dict[e]  .= sample(years_pool, number_years_training, replace=false)
-        years_testing_dict[e] .= filter(a -> !(a in years_training_dict[e]), years_pool)
-    end
-
-    number_rows_testing, number_rows_training = number_years_testing * number_periods_per_year, number_years_training * number_periods_per_year
-    col_tmp_testing, col_tmp_training = Vector{Float64}(undef, number_rows_testing), Vector{Float64}(undef, number_rows_training)
-    col_list_testing, col_list_training = [l for l = 1:L], [l for l = 1:L]
-    col_zeros_testing, col_zeros_training = zeros(Float64, number_rows_testing), zeros(Float64, number_rows_training)
-    y_tmp_testing, y_tmp_training = Vector{Float64}(undef, number_rows_testing), Vector{Float64}(undef, number_rows_training)
-    D_testing, D_training = Array{Float64, 2}(undef, number_rows_testing, L), Array{Float64, 2}(undef, number_rows_training, L)
-    ind_testing, ind_training = Array{Int64, 2}(undef, number_experiments, n), Array{Int64, 2}(undef, number_experiments, n)
-    ind_tmp_testing, ind_tmp_training = Array{Int64, 2}(undef, number_runs_per_experiment, n), Array{Int64, 2}(undef, number_runs_per_experiment, n)
-    ind_tmp = Vector{Int64}(undef, n)
-    obj_testing, obj_training = Vector{Float64}(undef, number_experiments), Vector{Float64}(undef, number_experiments)
-    obj_tmp_testing, obj_tmp_training = Vector{Float64}(undef, number_runs_per_experiment), Vector{Float64}(undef, number_runs_per_experiment)
-
-    @inbounds for e in experiments
-
-        println("Training Years: ", years_training_dict[e])
-        println("Testing Years: ", years_testing_dict[e])
-        matrix_slicer!(D, D_training, years_training_dict[e], years_windows_dict, number_periods_per_year, col_list_training)
-        matrix_slicer!(D, D_testing, years_testing_dict[e], years_windows_dict, number_periods_per_year, col_list_testing)
-
-        println("Training Experiment ", e)
-        @inbounds for run in runs
-            println("Training Run ", run)
-            if algorithm == "mirsa"
-                ind_tmp .= mirsa(D_training, c, n, N, I, E, T_init)
-            elseif algorithm == "rga"
-                ind_tmp .= randomised_greedy_algorithm(D_training, c, n, p)
-            elseif algorithm == "ga"
-                ind_tmp .= greedy_algorithm(D_training, c, n)
-            end
-            obj_tmp_training[run] = evaluate_obj(D_training, c, ind_tmp, col_tmp_training, col_zeros_training, y_tmp_training)
-            ind_tmp_training[run,:] .= ind_tmp
-        end
-        if criterion == "median"
-            reference_run = findall(obj_tmp_training .== median(obj_tmp_training))[1]
-        elseif criterion == "max"
-            reference_run = findall(obj_tmp_training .== maximum(obj_tmp_training))[1]
-        end
-        ind_tmp .= view(ind_tmp_training, reference_run, :)
-        obj_training[e] = evaluate_obj(D_testing, c, ind_tmp, col_tmp_testing, col_zeros_testing, y_tmp_testing)
-        ind_training[e, :] .= ind_tmp
-
-        println("Testing Experiment ", e)
-        @inbounds for run in runs
-            println("Testing Run ", run)
-            if algorithm == "mirsa"
-                ind_tmp .= mirsa(D_testing, c, n, N, I, E, T_init)
-            elseif algorithm == "rga"
-                ind_tmp .= randomised_greedy_algorithm(D_testing, c, n, p)
-            elseif algorithm == "ga"
-                ind_tmp .= greedy_algorithm(D_testing, c, n)
-            end
-            obj_tmp_testing[run] = evaluate_obj(D_testing, c, ind_tmp, col_tmp_testing, col_zeros_testing, y_tmp_testing)
-            ind_tmp_testing[run,:] .= ind_tmp
-        end
-        if criterion == "median"
-            reference_run = findall(obj_tmp_testing .== median(obj_tmp_testing))[1]
-        elseif criterion == "max"
-            reference_run = findall(obj_tmp_testing .== maximum(obj_tmp_testing))[1]
-        end
-        obj_testing[e] = obj_tmp_testing[reference_run]
-        ind_testing[e, :] .= view(ind_tmp_testing, reference_run, :)
-    end
-    return ind_training, ind_testing, obj_training, obj_testing
-end
-
-function time_custom_cross_validation(D::Array{Float64, 2}, c::Float64, n::Int64, number_years::Int64, number_years_training::Int64, number_years_testing::Int64, number_experiments::Int64, number_runs_per_experiment::Int64, criterion::String, algorithm::String="rga", number_periods_per_year::Int64=8760, p::Float64=0.05, N::Int64=1, I::Int64=2000, E::Int64=500, T_init::Float64=100.)
-  @time custom_cross_validation(D, c, n, number_years, number_years_training, number_years_testing, number_experiments, number_runs_per_experiment, criterion)
-end
-
-#################### Auxiliary Functions #######################
-
-function evaluate_obj(D_tmp::Array{Float64, 2}, c::Float64, ind_locations::Vector{Int64}, col_tmp::Vector{Float64}, col_zeros::Vector{Float64}, y_tmp::Vector{Float64})
-
-    col_tmp .= col_zeros
-    @inbounds for ind in ind_locations
-        col_tmp .+= view(D_tmp, :, ind)
-    end
-    y_tmp .= col_tmp .>= c
-    return sum(y_tmp)
-
-end
-
-function matrix_slicer!(D::Array{Float64, 2}, D_tmp::Array{Float64, 2}, keys::Vector{Int64}, row_dict::Dict{Int64, Vector{Int64}}, number_rows::Int64, column_list::Vector{Int64})
-
-    pointer_col = 1
-    @inbounds for ind in column_list
-        pointer_start, pointer_end = 1, number_rows
-        @inbounds for key in keys
-            pointer_start_row_tmp, pointer_end_row_tmp = row_dict[key][1], row_dict[key][number_rows]
-            D_tmp[pointer_start:pointer_end, pointer_col] .= view(D, pointer_start_row_tmp:pointer_end_row_tmp, ind)
-            pointer_start += number_rows
-            pointer_end += number_rows
-        end
-        pointer_col += 1
-    end
-end
-
-function time_matrix_slicer!(D::Array{Float64, 2}, D_tmp::Array{Float64, 2}, row_dict::Dict{Int64, Vector{Int64}}, number_rows::Int64, column_list::Vector{Int64})
-    @time matrix_slicer!(D, D_tmp, row_dict, number_rows, column_list)
-end
-
-#################### MIRSA #######################
-
-function mirsa(D::Array{Float64, 2}, c::Float64, n::Int64, N::Int64, I::Int64, E::Int64, T_init::Float64)
-
-  W, L = size(D)
-
-  # Solve Mixed-Integer Relaxation
-
-  MILP_model = Model(optimizer_with_attributes(Gurobi.Optimizer, "TimeLimit" => 3600., "MIPGap" => 0.05, "LogToConsole" => 0))
-
-  @variable(MILP_model, x[1:L], Bin)
-  @variable(MILP_model, 0 <= y[1:W] <= 1)
-
-  @constraint(MILP_model, cardinality, sum(x) == n)
-  @constraint(MILP_model, covering, D * x .>= c * y)
-
-  @objective(MILP_model, Max, sum(y))
-
-  optimize!(MILP_model)
-
-  x_init = round.(value.(x))
-
-  # Pre-allocate x-related arrays
-  ind_ones_incumbent = Vector{Int64}(undef, n)
-  ind_ones_incumbent_filtered = Vector{Int64}(undef, n-N)
-  ind_zeros_incumbent = Vector{Int64}(undef, L-n)
-  ind_zeros_incumbent_filtered = Vector{Int64}(undef, L-n-N)
-  ind_ones2zeros_candidate = Vector{Int64}(undef, N)
-  ind_zeros2ones_candidate = Vector{Int64}(undef, N)
-  ind_ones2zeros_tmp = Vector{Int64}(undef, N)
-  ind_zeros2ones_tmp = Vector{Int64}(undef, N)
-
-  # Pre-allocate y-related arrays
-  y_incumbent = Vector{Float64}(undef, W)
-  y_tmp = Vector{Float64}(undef, W)
-
-  Dx_incumbent = zeros(Float64, W)
-  Dx_tmp = Vector{Float64}(undef, W)
-
-  # Initialise
-  ind_ones_incumbent .= findall(x_init .== 1.)
-  ind_zeros_incumbent .= findall(x_init .== 0.)
-  @inbounds for ind in ind_ones_incumbent
-    Dx_incumbent .+= view(D, :, ind)
-  end
-  y_incumbent .= Dx_incumbent .>= c
-  obj_incumbent = sum(y_incumbent)
-
-  # Simulated Annealing Local Search
-  @inbounds for i = 1:I
-    delta_candidate = -100000
-    @inbounds for e = 1:E
-      # Sample from neighbourhood
-      sample!(ind_ones_incumbent, ind_ones2zeros_tmp, replace=false)
-      sample!(ind_zeros_incumbent, ind_zeros2ones_tmp, replace=false)
-
-      # Compute y and associated objective value
-      Dx_tmp .= Dx_incumbent
-      @inbounds for j = 1:N
-        Dx_tmp .+= view(D, :, ind_zeros2ones_tmp[j])
-        Dx_tmp .-= view(D, :, ind_ones2zeros_tmp[j])
-      end
-      y_tmp .= Dx_tmp .>= c
-
-      # Update objective difference
-      delta_tmp = sum(y_tmp) - obj_incumbent
-
-      # Update candidate solution
-      if delta_tmp > delta_candidate
-        ind_ones2zeros_candidate .= ind_ones2zeros_tmp
-        ind_zeros2ones_candidate .= ind_zeros2ones_tmp
-        delta_candidate = delta_tmp
-      end
-    end
-    if delta_candidate > 0
-      ind_ones_incumbent_filtered .= filter(a -> !(a in ind_ones2zeros_candidate), ind_ones_incumbent)
-      ind_ones_incumbent[1:(n-N)] .= ind_ones_incumbent_filtered
-      ind_ones_incumbent[(n-N+1):n] .= ind_zeros2ones_candidate
-      ind_zeros_incumbent_filtered .= filter(a -> !(a in ind_zeros2ones_candidate), ind_zeros_incumbent)
-      ind_zeros_incumbent[1:(L-n-N)] .= ind_zeros_incumbent_filtered
-      ind_zeros_incumbent[(L-n-N+1):(L-n)] .= ind_ones2zeros_candidate
-      @inbounds for j = 1:N
-        Dx_incumbent .+= view(D, :, ind_zeros2ones_candidate[j])
-        Dx_incumbent .-= view(D, :, ind_ones2zeros_candidate[j])
-      end
-      y_incumbent .= Dx_incumbent .>= c
-      obj_incumbent = sum(y_incumbent)
-    else
-      T = T_init * exp(-10*i/I)
-      p = exp(delta_candidate / T)
-      d = Binomial(1, p)
-      b = rand(d)
-      if b == 1
-        ind_ones_incumbent_filtered .= filter(a -> !(a in ind_ones2zeros_candidate), ind_ones_incumbent)
-        ind_ones_incumbent[1:(n-N)] .= ind_ones_incumbent_filtered
-        ind_ones_incumbent[(n-N+1):n] .= ind_zeros2ones_candidate
-        ind_zeros_incumbent_filtered .= filter(a -> !(a in ind_zeros2ones_candidate), ind_zeros_incumbent)
-        ind_zeros_incumbent[1:(L-n-N)] .= ind_zeros_incumbent_filtered
-        ind_zeros_incumbent[(L-n-N+1):(L-n)] .= ind_ones2zeros_candidate
-        @inbounds for j = 1:N
-          Dx_incumbent .+= view(D, :, ind_zeros2ones_candidate[j])
-          Dx_incumbent .-= view(D, :, ind_ones2zeros_candidate[j])
-        end
-        y_incumbent .= Dx_incumbent .>= c
-        obj_incumbent = sum(y_incumbent)
-      end
-    end
-  end
-  return ind_ones_incumbent
-end
-
-#################### Randomised Greedy Algorithm #######################
-
-function randomised_greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Int64, p::Float64)
-
-  W, L = size(D)
-  s = convert(Int64, round(L*p))
-  random_ind_set = Vector{Int64}(undef, s)
-  ind_compl_incumbent = [i for i in 1:L]
-  ind_incumbent = Vector{Int64}(undef, convert(Int64, n))
-  Dx_incumbent = zeros(Float64, W)
-  obj_incumbent = 0
-  Dx_tmp = Vector{Float64}(undef, W)
-  y_tmp = Vector{Float64}(undef, W)
-  ind_candidate_list = zeros(Int64, L)
-  locations_added, threshold = 0, 0
-  @inbounds while locations_added < n
-    if locations_added < c
-      threshold = locations_added + 1
-      obj_candidate = 0
-    else
-      obj_candidate = obj_incumbent
-    end
-    ind_candidate_pointer = 1
-    sample!(ind_compl_incumbent, random_ind_set, replace=false)
-    @inbounds for ind in random_ind_set
-        Dx_tmp .= Dx_incumbent .+ view(D, :, ind)
-        y_tmp .= Dx_tmp .>= threshold
-        obj_tmp = sum(y_tmp)
-        if obj_tmp > obj_candidate
-          ind_candidate_pointer = 1
-          ind_candidate_list[ind_candidate_pointer] = ind
-          obj_candidate = obj_tmp
-          ind_candidate_pointer += 1
-        elseif obj_tmp == obj_candidate
-          ind_candidate_list[ind_candidate_pointer] = ind
-          ind_candidate_pointer += 1
-        end
-    end
-    ind_candidate = sample(view(ind_candidate_list, 1:ind_candidate_pointer-1))
-    ind_incumbent[locations_added+1] = ind_candidate
-    filter!(a -> a != ind_candidate, ind_compl_incumbent)
-    Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate)
-    obj_incumbent = obj_candidate
-    locations_added += 1
-  end
-  return ind_incumbent
-end
-
-
-function greedy_algorithm(D::Array{Float64,2}, c::Float64, n::Int64)
-
-  W, L = size(D)
-  n = convert(Int64, n)
-  ind_compl_incumbent = [i for i in 1:L]
-  ind_incumbent = Vector{Int64}(undef, n)
-  Dx_incumbent = zeros(Float64, W)
-  obj_incumbent = 0
-  Dx_tmp = Vector{Float64}(undef, W)
-  y_tmp = Vector{Float64}(undef, W)
-  ind_candidate_list = zeros(Int64, L)
-  locations_added, threshold = 0, 0
-  @inbounds while locations_added < n
-    if locations_added < c
-      threshold = locations_added + 1
-      obj_candidate = 0
-    else
-      obj_candidate = obj_incumbent
-    end
-    ind_candidate_pointer = 1
-    @inbounds for ind in ind_compl_incumbent
-        Dx_tmp .= Dx_incumbent .+ view(D, :, ind)
-        y_tmp .= Dx_tmp .>= threshold
-        obj_tmp = sum(y_tmp)
-        if obj_tmp > obj_candidate
-          ind_candidate_pointer = 1
-          ind_candidate_list[ind_candidate_pointer] = ind
-          obj_candidate = obj_tmp
-          ind_candidate_pointer += 1
-        elseif obj_tmp == obj_candidate
-          ind_candidate_list[ind_candidate_pointer] = ind
-          ind_candidate_pointer += 1
-        end
-    end
-    ind_candidate = sample(view(ind_candidate_list, 1:ind_candidate_pointer-1))
-    ind_incumbent[locations_added+1] = ind_candidate
-    filter!(a -> a != ind_candidate, ind_compl_incumbent)
-    Dx_incumbent .= Dx_incumbent .+ view(D, :, ind_candidate)
-    obj_incumbent = obj_candidate
-    locations_added += 1
-  end
-  return ind_incumbent
-end
diff --git a/src/jl/optimisation_models.jl b/src/jl/optimisation_models.jl
deleted file mode 100644
index 5a8823deb0d09986a2222a08f9d4fec43d75cd81..0000000000000000000000000000000000000000
--- a/src/jl/optimisation_models.jl
+++ /dev/null
@@ -1,141 +0,0 @@
-using JuMP
-using Gurobi
-
-function solve_MILP(D::Array{Float64, 2}, c::Float64, n::Float64, solver::String)
-
-  W = size(D)[1]
-  L = size(D)[2]
-
-  if solver == "Gurobi"
-    MILP_model = Model(optimizer_with_attributes(Gurobi.Optimizer, "TimeLimit" => 7200., "MIPGap" => 0.01, "LogToConsole" => 0))
-  else
-      println("Please use Cbc or Gurobi")
-      throw(ArgumentError)
-  end
-
-  @variable(MILP_model, x[1:L], Bin)
-  @variable(MILP_model, 0 <= y[1:W] <= 1)
-
-  @constraint(MILP_model, cardinality, sum(x) == n)
-  @constraint(MILP_model, covering, D * x .>= c * y)
-
-  @objective(MILP_model, Max, sum(y))
-
-  optimize!(MILP_model)
-
-  x_sol = round.(value.(x))
-
-  return x_sol
-
-end
-
-function solve_MILP_partitioning(D::Array{Float64, 2}, c::Float64, n::Array{Int64, 1}, partitions_indices::Dict{Int64, Int64}, solver::String)
-
-  W = size(D)[1]
-  L = size(D)[2]
-  P = length(n)
-
-  # Computes number of locations in each partition
-  cnt = zeros(Int64, P)
-  for i = 1:L
-    cnt[partitions_indices[i]] += 1
-  end
-
-  # Computes indices of partitions
-  ind_part = Vector{Int64}(undef, P+1)
-  ind_part[1] = 1
-  for i = 1:P
-    ind_part[i+1] = ind_part[i] + cnt[i]
-  end
-
-  # Selects solver
-  if solver == "Gurobi"
-    MILP_model = Model(optimizer_with_attributes(Gurobi.Optimizer, "TimeLimit" => 7200., "MIPGap" => 0.01, "LogToConsole" => 0))
-  else
-      println("Please use Cbc or Gurobi")
-      throw(ArgumentError)
-  end
-
-  # Defines variables
-  @variable(MILP_model, x[1:L], Bin)
-  @variable(MILP_model, 0 <= y[1:W] <= 1)
-
-  # Defines Constraints
-  @constraint(MILP_model, cardinality[i=1:P], sum(x[ind_part[i]:(ind_part[i+1]-1)]) == n[i])
-  @constraint(MILP_model, covering, D * x .>= c * y)
-
-  # Defines objective function
-  @objective(MILP_model, Max, sum(y))
-
-  # Solves model
-  optimize!(MILP_model)
-
-  # Extracts solution
-  x_sol = round.(value.(x))
-
-  return x_sol
-
-end
-
-function solve_IP(D::Array{Float64, 2}, c::Float64, n::Float64, solver::String, timelimit::Float64, gap::Float64)
-
-  W = size(D)[1]
-  L = size(D)[2]
-
-  println("Building IP Model")
-  if solver == Gurobi
-    IP_model = Model(optimizer_with_attributes(solver.Optimizer, "TimeLimit" => timelimit, "MIPGap" => gap))
-  else
-      println("Please use Cbc or Gurobi")
-      throw(ArgumentError)
-  end
-
-  @variable(IP_model, x[1:L], Bin)
-  @variable(IP_model, y[1:W], Bin)
-
-  @constraint(IP_model, cardinality, sum(x) == n)
-  @constraint(IP_model, covering, D * x .>= c * y)
-
-  @objective(IP_model, Max, sum(y))
-
-  optimize!(IP_model)
-
-  x_sol = value.(x)
-  y_sol = value.(y)
-
-  return x_sol, y_sol
-
-end
-
-function warm_start_IP(D::Array{Float64, 2}, c::Float64, n::Float64, x_init::Array{Float64, 1}, y_init::Array{Float64, 1}, solver::String, timelimit::Float64, gap::Float64)
-
-W = size(D)[1]
-L = size(D)[2]
-
-println("Warmstarting IP Model")
-if solver == Gurobi
-  IP_model = Model(optimizer_with_attributes(solver.Optimizer, "TimeLimit" => timelimit, "MIPGap" => gap))
-else
-    println("Please use Cbc or Gurobi")
-    throw(ArgumentError)
-end
-
-@variable(IP_model, x[1:L], Bin)
-@variable(IP_model, y[1:W], Bin)
-
-set_start_value.(x, x_init)
-set_start_value.(y, y_init)
-
-@constraint(IP_model, cardinality, sum(x) == n)
-@constraint(IP_model, covering, D * x .>= c * y)
-
-@objective(IP_model, Max, sum(y))
-
-optimize!(IP_model)
-
-x_sol = value.(x)
-y_sol = value.(y)
-
-return x_sol, y_sol
-
-end
diff --git a/src/main.py b/src/main.py
index 62f32285e4e476ea9fe2e3ca7e94a73c19837387..ccc6f2ab686921d3a5d4af20e960c43eb1b6c044 100644
--- a/src/main.py
+++ b/src/main.py
@@ -1,49 +1,26 @@
 import pickle
 import yaml
-from os.path import join, isfile
-from numpy import array, sum
-from pyomo.opt import SolverFactory
-import time
-import argparse
-
-from helpers import read_inputs, init_folder, custom_log, xarray_to_ndarray, generate_jl_input, get_deployment_vector
-from tools import read_database, return_filtered_coordinates, selected_data, return_output, resource_quality_mapping, \
-    critical_window_mapping, sites_position_mapping
-from models import build_ip_model
-
-
-def parse_args():
-
-    parser = argparse.ArgumentParser(description='Command line arguments.')
-
-    parser.add_argument('--c', type=int)
-    parser.add_argument('--p', type=float, default=None)
-    parser.add_argument('--run_BB', type=bool, default=False)
-    parser.add_argument('--run_MIR', type=bool, default=False)
-    parser.add_argument('--run_LS', type=bool, default=False)
-    parser.add_argument('--run_GRED_DET', type=bool, default=False)
-    parser.add_argument('--run_GRED_STO', type=bool, default=False)
-    parser.add_argument('--run_RAND', type=bool, default=False)
-    parser.add_argument('--run_CROSS', type=bool, default=False)
-    parser.add_argument('--LS_init_algorithm', type=str, default=None)
-    parser.add_argument('--CROSS_method', type=str, default=None)
-    parser.add_argument('--CROSS_algorithm', type=str, default=None)
-    parser.add_argument('--train', type=int, default=None)
-    parser.add_argument('--test', type=int, default=None)
-
-    parsed_args = vars(parser.parse_args())
-
-    return parsed_args
+from os.path import join, isfile, isdir
+from os import makedirs
+from numpy import sum, float64
+from time import strftime
+import julia
+from julia import Main
+
+from helpers import read_inputs, custom_log, xarray_to_ndarray, \
+                    generate_jl_input, get_deployment_vector
+from tools import read_database, return_filtered_coordinates, \
+                  selected_data, return_output, resource_quality_mapping, \
+                  critical_window_mapping, sites_position_mapping
 
 if __name__ == '__main__':
 
-    args = parse_args()
-
     model_parameters = read_inputs('../config_model.yml')
     siting_parameters = model_parameters['siting_params']
     tech_parameters = read_inputs('../config_techs.yml')
 
     data_path = model_parameters['data_path']
+    julia_path = model_parameters['julia_models_path']
     spatial_resolution = model_parameters['spatial_resolution']
     time_horizon = model_parameters['time_slice']
 
@@ -54,7 +31,7 @@ if __name__ == '__main__':
     if isfile(join(data_path, 'input/criticality_matrix.p')):
 
         custom_log(' WARNING! Instance data read from files.')
-        criticality_data = pickle.load(open(join(data_path, 'input/criticality_matrix.p'), 'rb'))
+        D = pickle.load(open(join(data_path, 'input/criticality_matrix.p'), 'rb'))
         site_coordinates = pickle.load(open(join(data_path, 'input/site_coordinates.p'), 'rb'))
         capacity_factors_data = pickle.load(open(join(data_path, 'input/capacity_factors_data.p'), 'rb'))
         site_positions = pickle.load(open(join(data_path, 'input/site_positions.p'), 'rb'))
@@ -75,189 +52,87 @@ if __name__ == '__main__':
         truncated_data = selected_data(database, site_coordinates, time_horizon)
         capacity_factors_data = return_output(truncated_data, data_path)
         time_windows_data = resource_quality_mapping(capacity_factors_data, siting_parameters)
-        criticality_data = xarray_to_ndarray(critical_window_mapping(time_windows_data, model_parameters))
+        D = xarray_to_ndarray(critical_window_mapping(time_windows_data, model_parameters))
         site_positions = sites_position_mapping(time_windows_data)
 
-        pickle.dump(criticality_data, open(join(data_path, 'input/criticality_matrix.p'), 'wb'), protocol=4)
+        pickle.dump(D, open(join(data_path, 'input/criticality_matrix.p'), 'wb'), protocol=4)
         pickle.dump(site_coordinates, open(join(data_path, 'input/site_coordinates.p'), 'wb'), protocol=4)
         pickle.dump(capacity_factors_data, open(join(data_path, 'input/capacity_factors_data.p'), 'wb'), protocol=4)
         pickle.dump(site_positions, open(join(data_path, 'input/site_positions.p'), 'wb'), protocol=4)
 
-        custom_log(' Data read. Building model.')
-
-    siting_parameters['solution_method']['BB']['mir'] = args['run_MIR']
-    siting_parameters['solution_method']['BB']['set'] = args['run_BB']
-    siting_parameters['solution_method']['LS']['set'] = args['run_LS']
-    siting_parameters['solution_method']['CROSS']['set'] = args['run_CROSS']
-    siting_parameters['solution_method']['GRED_DET']['set'] = args['run_GRED_DET']
-    siting_parameters['solution_method']['GRED_STO']['set'] = args['run_GRED_STO']
-    siting_parameters['solution_method']['GRED_STO']['p'] = args['p']
-    siting_parameters['solution_method']['RAND']['set'] = args['run_RAND']
-    siting_parameters['solution_method']['CROSS']['method'] = args['CROSS_method']
-    siting_parameters['solution_method']['CROSS']['algorithm'] = args['CROSS_algorithm']
-    siting_parameters['solution_method']['CROSS']['no_years_train'] = args['train']
-    siting_parameters['solution_method']['CROSS']['no_years_test'] = args['test']
-
-    c = args['c']
-
-    if siting_parameters['solution_method']['BB']['set']:
-
-        custom_log(' BB chosen to solve the IP.')
-        params = siting_parameters['solution_method']['BB']
-
-        output_folder = init_folder(model_parameters, c, f"_BB_MIR_{args['run_MIR']}")
-        with open(join(output_folder, 'config_model.yaml'), 'w') as outfile:
-            yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False)
-        with open(join(output_folder, 'config_techs.yaml'), 'w') as outfile:
-            yaml.dump(tech_parameters, outfile, default_flow_style=False, sort_keys=False)
-
-        # Solver options for the MIP problem
-        opt = SolverFactory(params['solver'])
-        opt.options['MIPGap'] = params['mipgap']
-        opt.options['Threads'] = params['threads']
-        opt.options['TimeLimit'] = params['timelimit']
-
-        instance = build_ip_model(deployment_dict, site_coordinates, criticality_data,
-                                  c, output_folder, args['run_MIR'])
-        custom_log(' Sending model to solver.')
-
-        results = opt.solve(instance, tee=False, keepfiles=False,
-                            report_timing=False, logfile=join(output_folder, 'solver_log.log'))
-
-        objective = instance.objective()
-        x_values = array(list(instance.x.extract_values().values()))
-
-        pickle.dump(x_values, open(join(output_folder, 'solution_matrix.p'), 'wb'))
-        pickle.dump(objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
-
-    elif siting_parameters['solution_method']['LS']['set']:
-
-        custom_log(f" LS_{args['LS_init_algorithm']} chosen to solve the IP. Opening a Julia instance.")
-        params = siting_parameters['solution_method']['LS']
-
-        jl_dict = generate_jl_input(deployment_dict, site_coordinates)
-        path_to_sol = '10y_n560_k1_c' + str(args['c']) + '_GRED_STGH_p0.05'
-        path_to_init_sol_folder = join(data_path, 'output', path_to_sol)
-
-        import julia
-        j = julia.Julia(compiled_modules=False)
-        from julia import Main
-        Main.include("jl/SitingHeuristics.jl")
-
-        start = time.time()
-        jl_selected, jl_objective, jl_traj = Main.main_MIRSA(jl_dict['index_dict'], jl_dict['deployment_dict'],
-                                                             criticality_data, c, params['neighborhood'],
-                                                             params['no_iterations'], params['no_epochs'],
-                                                             params['initial_temp'], params['no_runs'],
-                                                             args['LS_init_algorithm'], args['p'], path_to_init_sol_folder)
-        end = time.time()
-        print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs'], 1)} s")
-
-        output_folder = init_folder(model_parameters, c, suffix=f"_LS_{args['LS_init_algorithm']}")
-
-        with open(join(output_folder, 'config_model.yaml'), 'w') as outfile:
-            yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False)
-        with open(join(output_folder, 'config_techs.yaml'), 'w') as outfile:
-            yaml.dump(tech_parameters, outfile, default_flow_style=False, sort_keys=False)
-
-        pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
-        pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
-        pickle.dump(jl_traj, open(join(output_folder, 'trajectory_matrix.p'), 'wb'))
-
-    elif siting_parameters['solution_method']['RAND']['set']:
-    
-        custom_log(' Locations to be chosen via random search.')
-        params = siting_parameters['solution_method']['RAND']
-    
-        jl_dict = generate_jl_input(deployment_dict, site_coordinates)
-    
-        import julia
-        j = julia.Julia(compiled_modules=False)
-        from julia import Main
-        Main.include("jl/SitingHeuristics.jl")
-
-        jl_selected, jl_objective = Main.main_RAND(jl_dict['deployment_dict'], criticality_data,
-                                                   c, params['no_iterations'], params['no_runs'],
-                                                   params['algorithm'])
-    
-        output_folder = init_folder(model_parameters, c, suffix='_RS')
-    
-        pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
-        pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
-
-    elif siting_parameters['solution_method']['GRED_DET']['set']:
-
-        params = siting_parameters['solution_method']['GRED_DET']
-        custom_log(f" GRED_{params['algorithm']} chosen to solve the IP. Opening a Julia instance.")
-
-        jl_dict = generate_jl_input(deployment_dict, site_coordinates)
-
-        import julia
-        j = julia.Julia(compiled_modules=False)
-        from julia import Main
-        Main.include("jl/SitingHeuristics.jl")
+    output_dir = join(data_path, f"output/{strftime('%Y%m%d_%H%M%S')}/")
+    if not isdir(output_dir):
+        makedirs(output_dir)
+
+    with open(join(output_dir, 'config_model.yaml'), 'w') as outfile:
+        yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False)
+    with open(join(output_dir, 'config_techs.yaml'), 'w') as outfile:
+        yaml.dump(tech_parameters, outfile, default_flow_style=False, sort_keys=False)
+
+    c = float64(siting_parameters['c'])
+    D = D.astype(float64)
 
-        start = time.time()
-        jl_selected, jl_objective = Main.main_GRED(jl_dict['deployment_dict'], criticality_data, c,
-                                                   params['no_runs'], params['p'], params['algorithm'])
-        end = time.time()
-        print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs'], 1)} s")
-
-        output_folder = init_folder(model_parameters, c, suffix=f"_GRED_{params['algorithm']}")
+    jl_dict = generate_jl_input(deployment_dict, site_coordinates)
+    j = julia.Julia(compiled_modules=False)
+    Main.include(julia_path)
 
-        pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
-        pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
-
-    elif siting_parameters['solution_method']['GRED_STO']['set']:
+    custom_log(f" {siting_parameters['algorithm']} chosen to solve the instance.")
 
-        params = siting_parameters['solution_method']['GRED_STO']
-        custom_log(f" GRED_{params['algorithm']} chosen to solve the IP. Opening a Julia instance.")
+    if siting_parameters['algorithm'] == 'MIP':
 
-        jl_dict = generate_jl_input(deployment_dict, site_coordinates)
+        x, obj = Main.MIP(D, c, float64(jl_dict['k']))
 
-        import julia
-        j = julia.Julia(compiled_modules=False)
-        from julia import Main
-        Main.include("jl/SitingHeuristics.jl")
+    elif siting_parameters['algorithm'] == 'MIR':
 
-        start = time.time()
-        jl_selected, jl_objective = Main.main_GRED(jl_dict['deployment_dict'], criticality_data, c,
-                                                   params['no_runs'], params['p'], params['algorithm'])
-        end = time.time()
-        print(f"Average CPU time for c={c}: {round((end-start)/params['no_runs'], 1)} s")
+        x, obj = Main.MIR(D, c, float64(jl_dict['k']))
 
-        output_folder = init_folder(model_parameters, c, suffix=f"_GRED_{params['algorithm']}_p{params['p']}")
-
-        pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
-        pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
-
-    elif siting_parameters['solution_method']['CROSS']['set']:
-
-        params = siting_parameters['solution_method']['CROSS']
-        custom_log(f" Cross-validation starting. Opening a Julia instance.")
-
-        import julia
-        j = julia.Julia(compiled_modules=False)
-        from julia import Main
-        Main.include("jl/SitingHeuristics.jl")
-
-        obj_train, obj_test, ind_train, ind_test = Main.main_CROSS(criticality_data, c,
-                                                sum(model_parameters['deployments']), params['k'],
-                                                params['no_years'], params['no_years_train'], params['no_years_test'],
-                                                params['no_experiments'], params['no_runs_per_experiment'],
-                                                params['criterion'], params['method'], params['algorithm'])
-
-        if params['no_years_train'] == params['no_years_test']:
-            bal = 'balanced'
-        else:
-            bal = 'unbalanced'
+    elif siting_parameters['algorithm'] == 'RG':
 
-        output_folder = init_folder(model_parameters, c, suffix=f"_CROSS_{params['method']}_{params['algorithm']}_{bal}")
+        # TODO: At this stage, this runs one single time.
+        x, obj = Main.RG(D, c, float64(jl_dict['k']))
 
-        pickle.dump(obj_train, open(join(output_folder, 'obj_train.p'), 'wb'))
-        pickle.dump(obj_test, open(join(output_folder, 'obj_test.p'), 'wb'))
-        pickle.dump(ind_train, open(join(output_folder, 'ind_train.p'), 'wb'))
-        pickle.dump(ind_test, open(join(output_folder, 'ind_test.p'), 'wb'))
+    elif siting_parameters['algorithm'] == 'RGP':
+
+        p = siting_parameters['method_params']['RGP']['p']
+        # TODO: At this stage, this runs one single time.
+        x, obj = Main.RGP(D, c, float64(jl_dict['k']), p)
+
+    elif siting_parameters['algorithm'] == 'RS':
+
+        S = siting_parameters['method_params']['RS']['samples']
+        x, obj = Main.RS(D, c, float64(jl_dict['k']), S)
+
+    elif siting_parameters['algorithm'] == 'RSSA':
+
+        I = siting_parameters['method_params']['RSSA']['no_iterations']
+        N = siting_parameters['method_params']['RSSA']['no_epochs']
+        r = siting_parameters['method_params']['RSSA']['radius']
+        T_init = siting_parameters['method_params']['RSSA']['initial_temp']
+        S = siting_parameters['method_params']['RSSA']['samples']
+        x_init, _ = Main.RG(D, c, float64(jl_dict['k']), S)
+        x, obj = Main.SA(D, c, float64(jl_dict['k']), x_init, I, N, r, T_init)
+
+    elif siting_parameters['algorithm'] == 'MIRSA':
+
+        I = siting_parameters['method_params']['MIRSA']['no_iterations']
+        N = siting_parameters['method_params']['MIRSA']['no_epochs']
+        r = siting_parameters['method_params']['MIRSA']['radius']
+        T_init = siting_parameters['method_params']['MIRSA']['initial_temp']
+        x, obj = Main.MIRSA(D, c, float64(jl_dict['k']), I, N, r, T_init)
+
+    elif siting_parameters['algorithm'] == 'RGPSA':
+
+        p = siting_parameters['method_params']['RGPSA']['p']
+        n = siting_parameters['method_params']['RGPSA']['init_runs']
+        I = siting_parameters['method_params']['RGPSA']['no_iterations']
+        N = siting_parameters['method_params']['RGPSA']['no_epochs']
+        r = siting_parameters['method_params']['RGPSA']['radius']
+        T_init = siting_parameters['method_params']['RGPSA']['initial_temp']
+        x, obj = Main.RGPSA(D, c, float64(jl_dict['k']), p, n, I, N, r, T_init)
 
     else:
-        raise ValueError(' This solution method is not available. ')
+        raise ValueError(f" Algorithm {siting_parameters['algorithm']} is not available.")
+
+    pickle.dump(x, open(join(output_dir, 'solution_matrix.p'), 'wb'))
+    pickle.dump(obj, open(join(output_dir, 'objective_vector.p'), 'wb'))
+    custom_log(" Results written to disk.")
diff --git a/src/models.py b/src/models.py
deleted file mode 100644
index 60bb9e5ed602f0523153b1c9f9a413ceea15d517..0000000000000000000000000000000000000000
--- a/src/models.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from os.path import join
-
-from numpy import arange
-from pyomo.environ import ConcreteModel, Var, Binary, maximize, NonNegativeReals
-from pyomo.opt import ProblemFormat
-from pypsa.opt import l_constraint, LConstraint, l_objective, LExpression
-
-from tools import retrieve_index_dict
-
-
-def build_ip_model(deployment_dict, coordinate_dict, critical_matrix, c, output_folder, mir=False, write_lp=False):
-    """Model build-up.
-
-    Parameters:
-
-    ------------
-
-
-    Returns:
-
-    -----------
-
-    """
-
-    no_windows = critical_matrix.shape[0]
-    no_locations = critical_matrix.shape[1]
-
-    n, dict_deployment, partitions, indices = retrieve_index_dict(deployment_dict, coordinate_dict)
-
-    for item in partitions:
-        if item in indices:
-            if dict_deployment[item] > len(indices[item]):
-                raise ValueError(' More nodes required than available for {}'.format(item))
-        else:
-            indices[item] = []
-            print('Warning! {} not in keys of choice. Make sure there is no requirement here.'.format(item))
-
-    model = ConcreteModel()
-
-    model.W = arange(1, no_windows + 1)
-    model.L = arange(1, no_locations + 1)
-
-    model.x = Var(model.L, within=Binary)
-    if not mir:
-        model.y = Var(model.W, within=Binary)
-    else:
-        model.y = Var(model.W, within=NonNegativeReals, bounds=(0, 1))
-
-    activation_constraint = {}
-
-    for w in model.W:
-        lhs = LExpression([(critical_matrix[w - 1, l - 1], model.x[l]) for l in model.L])
-        rhs = LExpression([(c, model.y[w])])
-
-        activation_constraint[w] = LConstraint(lhs, ">=", rhs)
-
-    l_constraint(model, "activation_constraint", activation_constraint, list(model.W))
-
-    cardinality_constraint = {}
-
-    for item in partitions:
-        lhs = LExpression([(1, model.x[l]) for l in indices[item]])
-        rhs = LExpression(constant=dict_deployment[item])
-
-        cardinality_constraint[item] = LConstraint(lhs, "==", rhs)
-
-    l_constraint(model, "cardinality_constraint", cardinality_constraint, partitions)
-
-    objective = LExpression([(1, model.y[w]) for w in model.W])
-    l_objective(model, objective, sense=maximize)
-
-    if write_lp:
-        model.write(filename=join(output_folder, 'model.lp'),
-                    format=ProblemFormat.cpxlp,
-                    io_options={'symbolic_solver_labels': True})
-
-    return model