diff --git a/.gitignore b/.gitignore
index 5af5aa770c468b14bda0051ad8dc1370a8a4ad77..12825164f319c1f993fad62c991f023f50911bee 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,3 @@
-input_data
-output_data
-resource_data
 *.lp
 *.log
 *.pyc
diff --git a/README.md b/README.md
index 68ee7fda9877b1d119c3b5ad2b1f4dce2f6573f7..3f177f78ef1b4ba4e53d05f9fbdab43221ed04ec 100644
--- a/README.md
+++ b/README.md
@@ -10,78 +10,18 @@ in the `requirements.yml` file included in the repository. Their installation ca
 command that builds a separate environment for this module from the `yml` file:
    
     conda env create -f environment.yml
-    
-In addition, the tool also requires the installation of the `pypsa` module (details [here](https://pypsa.org/)) for model
-development purposes which are detailed in the `models.py` script. Since one of the latest versions of this module is 
-required, its installation has to be carried out manually via
 
-	pip install git+https://github.com/PyPSA/PyPSA.git
-
-Lastly, the tool requires to installation of an MILP solver. At the time of writing, `gurobi` is the only solver this 
+The tool requires to installation of an MILP solver. At the time of writing, `gurobi` is the only solver this 
 model supports (see installation details [here](https://www.gurobi.com/documentation/8.1/remoteservices/installation.html).
-
-At the time of writing, the model readily supports (given the availability of already downloaded data
-[here](https://dox.uliege.be/index.php/s/trys2xY7j9JsQ3z) and the set-up within `tools.py`) siting assessments in 
-Europe, North Africa, Middle East, Greenland and the USA.
-
    
 # Example run
 
-The module structure includes five folders. The `input data` folder contains all data required at different stages within 
-the model. The `output_data` folder contains the result sub-folders. The `src_acquisition` folder contains the script
-used for reanalysis data download from the ECMWF ERA5 database. The `src` folder contains the scripts running the `resite`
-model. Finally, the `src_postprocessing` folder includes scripts used for analysing results.
-
-#### I. Resource data retrieval
-It should be noted that usable data (incl. resource data, land characteristics, etc.) are already available 
-[here](https://dox.uliege.be/index.php/s/trys2xY7j9JsQ3z). We recommend using this data to get a good understanding
-of the model architecture and cababilities. In case additional data is needed, the acquisition procedure detailed below
-should suffice. 
-1. Set up access to the CDS API by following instructions [here](https://cds.climate.copernicus.eu/api-how-to).
-2. Open the `data_retrieval.py` file in `src_acquisition` using any file editor or IDE available and adjust the
-download settings as required (e.g., area, time coverage, spatial resolution, variables). 
-3. Run `data_retrieval.py`. This will commence a connection to the CDS servers and data
-download will start. According to the amount of data requested, this process takes time.
-Downloaded files will be saved in the `input_data/resource_data` folder in a subfolder named after the spatial resolution chosen.
-                 
-#### II. Optimal deployment of generation sites
-1. Open the `parameters.yml` file using any available text editor or IDE and adjust the model parameters according to 
-the needs. Further comments on how to set these are provided in the file. Save the file.
-2. Run `main.py`. Upon completion, the output is saved in a dedicated sub-folder (named based on its creation time stamp) in `output_data`.
-
-#### III. Results assessment
-Run `output.py` in `src_postprocessing`. In console, define an instance of the `Output` class associated to a run, e.g.
-`20190101_123000`, as such:
-
-```
-instance = Output('20190101_123000')
-```
-
-The `instance` has the following attributes:
-+ `return_numerics()`, which returns a series of scalar indicators associated with i) the optimal deployment scheme and
-ii) a couple other siting strategies. It should be called as:
+To be added.
 
-```
-results = instance.return_numerics(**kwargs)
-```
-+ `plot_numerics()`, which provides a visual representation of the results obtained above. It is called as:
-```
-instance.plot_numerics(results, **kwargs)
-```
-+ `optimal_location_plot()`, which plots the optimal sites on a map. It is called as:
-```
-instance.optimal_locations_plot()
-```
-+ `optimal_locations_plot_heatmaps()`, which plots RES capacities per node (if capacities are considered)
-```
-instance.optimal_locations_plot_heatmaps()
-```
-    
 # Citing
 Please cite the following paper if you use this software in your research.
 
-+ D. Radu, M. Berger, R. Fonteneau, A. Dubois, H. Pandžić, Y. Dvorkin, Q. Louveaux, D. Ernst, [Resource
-Complementarity as Criterion for Siting Renewable Energy Generation Assets](https://orbi.uliege.be/handle/2268/240014), pre-print, 2019.
+To be added.
 
 # License
 This software is released as free software under the [GPLv3](http://www.gnu.org/licenses/gpl-3.0.en.html).
diff --git a/config_model.yml b/config_model.yml
index ffe37b00fa135da2888232a8afcfbc10315e0cfa..7ea2f7afde77fd08c45c2c063b50feffee14905c 100644
--- a/config_model.yml
+++ b/config_model.yml
@@ -1,84 +1,58 @@
-# Spatial resolution (in degrees) of the potential sites.
-spatial_resolution: 0.5
-# Path towards the various input data.
-path_resource_data: '../input_data/resource_data'
-path_transfer_function_data: '../input_data/transfer_functions'
-path_population_density_data: '../input_data/population_density'
-path_protected_areas_data: '../input_data/protected_areas'
-path_land_data: '../input_data/land_data'
-path_load_data: '../input_data/load_data'
-path_transmission_data: '../input_data/transmission_data'
-path_potential_data: '../input_data/potentials'
-path_shapefile_data: '../input_data/shapefiles'
-
-# Various data layers to be taken into account in potential site selection.
-resource_quality_layer: True
-population_density_layer: True
-protected_areas_layer: False # expensive
-bathymetry_layer: False # for offshore
-orography_layer: True
-forestry_layer: True
-water_mask_layer: True
-latitude_layer: True
-distance_layer: False # for offshore
+# Path to data folder
+data_path: 'D:/ULg_PhD_work/datasets/resite_ip/'
+#data_path: '/data/dcradu/resite_ip/'
 
+# Spatial resolution (in degrees) of the potential sites.
+spatial_resolution: 0.28
 # Start time and end time of the analysis.
-time_slice: ['2018-01-01T00:00', '2018-01-31T23:00']
-# List of regions to be considered in the optimization.
-regions: ['EU']
+time_slice: ['2014-01-01T00:00', '2014-12-31T23:00']
 # Technologies to deploy.
-technologies: ['wind_onshore', 'pv_utility']
-deployment_vector: {'EU': {'wind_onshore': 64, 'pv_utility': 28}}
-
-# Assessment measure for each time window. Available: mean, median or percentiles.
-smooth_measure: 'mean'
-# Defines how \alpha is considered in space and time.
-alpha: 'load_central'
-# Normalization procedures (detailed in tools.py). (min, max)
-norm_type: 'max'
-# Time-window length used to compute the criticality indicator. Integer value.
-delta: 1
-
-
-# Keeping files at the end of the run.
-keep_files: True
-# Run name
-name_prefix: 'book_'
+regions: ['FR', 'SE', 'NO']
+technologies: ['wind_onshore']
+deployments: [10, 5, 3]
 
-# Solution method: BB or HEU or RAND or GRED.
-solution_method:
-  BB:
-    # Branch & Bound
-    set: True
-    c: 1
-    solver: 'gurobi'
-    mipgap: 0.04
-    timelimit: 1800
-    threads: 4
-  MIRSA:
-    # Simulated Annealing with Local Search
-    set: False
-    c: [46]
-    neighborhood: 1
-    no_iterations: 1000
-    no_epochs: 1000
-    initial_temp: 200.
-    no_runs: 1
-    algorithm: 'SALSR' #'GLS'
-    which_sol: 'rand' #'rand'
-    seed: 1
-    purpose: 'planning' # 'ol'
-  GRED:
-    set: False
-    epsilon: 0.001
-    c: [1, 106, 212, 318, 424, 530]
-    no_runs: 1
-    algorithm: 'RGH' # SGA
-  RAND:
-    # Random Search
-    set: False
-    c: [318]
-    no_iterations: 50
-    no_epochs: 500
-    no_runs: 1
-    algorithm: 'RS'
\ No newline at end of file
+siting_params:
+  smooth_measure: 'mean'
+  # Defines how \alpha is considered in space and time.
+  alpha: 'load_central'
+  # Normalization procedures (detailed in tools.py). (min, max)
+  norm_type: 'max'
+  # Time-window length used to compute the criticality indicator. Integer value.
+  delta: 1
+  # Solution method: BB or HEU or RAND or GRED.
+  solution_method:
+    BB:
+      # Branch & Bound
+      set: False
+      c: 1
+      solver: 'gurobi'
+      mipgap: 0.05
+      timelimit: 1800
+      threads: 4
+    MIRSA:
+      # Simulated Annealing with Local Search
+      set: True
+      c: [10]
+      neighborhood: 1
+      no_iterations: 1000
+      no_epochs: 1000
+      initial_temp: 200.
+      no_runs: 1
+      algorithm: 'SALS' #'GLS'
+      which_sol: 'rand' #'rand'
+      seed: 1
+      purpose: 'planning' # 'ol'
+    GRED:
+      set: False
+      epsilon: 0.001
+      c: [1, 106, 212, 318, 424, 530]
+      no_runs: 1
+      algorithm: 'RGH' # SGA
+    RAND:
+      # Random Search
+      set: False
+      c: [318]
+      no_iterations: 50
+      no_epochs: 500
+      no_runs: 1
+      algorithm: 'RS'
diff --git a/config_techs.yml b/config_techs.yml
index 7bdce15826181ea16d4fecae9dbd2f28e4c5452d..c7b7c2ab77bd0df759a389bfac254a40c6532efe 100644
--- a/config_techs.yml
+++ b/config_techs.yml
@@ -1,5 +1,6 @@
 # Config file for various conversion technologies.
 wind_onshore:
+  filters: ['resource_quality', 'population_density', 'orography', 'forestry', 'water_mask', 'latitude']
   converter_IV: 'V110'
   converter_III: 'E103'
   converter_II: 'V90'
@@ -19,6 +20,7 @@ wind_onshore:
   latitude_threshold: 65.
 
 wind_offshore:
+  filters: ['resource_quality', 'bathymetry', 'latitude', 'distance']
   converter_IV: 'V90'
   converter_III: 'V90'
   converter_II: 'V164'
@@ -40,6 +42,7 @@ wind_offshore:
   distance_threshold_max: 222.2 # 111.
 
 wind_floating:
+  filters: ['resource_quality', 'bathymetry', 'latitude', 'distance']
   converter_IV: 'V90'
   converter_III: 'V90'
   converter_II: 'V164'
@@ -61,6 +64,7 @@ wind_floating:
   distance_threshold_max: 180.
 
 pv_utility:
+  filters: ['resource_quality', 'population_density', 'orography', 'forestry', 'water_mask', 'latitude']
   converter: 'DEG15MC'
   resource: 'solar'
   deployment: 'utility'
@@ -76,7 +80,9 @@ pv_utility:
   forestry_ratio_threshold: 0.8
   latitude_threshold: 65.
 
+#TODO: fix pv_residential filters
 pv_residential:
+  filters: ['population_density', 'water_mask']
   converter: 'DD06M'
   resource: 'solar'
   deployment: 'residential'
diff --git a/requirements.yml b/requirements.yml
index f1839c309c3b96ccb604049544e05856d2771305..af87dde193108af6d80166461b2519a0d86032fe 100644
--- a/requirements.yml
+++ b/requirements.yml
@@ -1,4 +1,4 @@
-name: resite
+name: resite_ip
 channels:
   - conda-forge
   - defaults
@@ -7,16 +7,13 @@ dependencies:
   - cdsapi
   - dask
   - matplotlib
-  - Pyomo
+  - pyomo
   - scipy
   - toolz
   - xarray
   - geopandas
-  - cartopy
   - pyyaml
   - xlrd
-  - tqdm
-  - joblib
   - geopy
   - pypsa
   - shapely
\ No newline at end of file
diff --git a/src/era5data.py b/src/era5data.py
new file mode 100644
index 0000000000000000000000000000000000000000..883255604834891505e493a5f422efcdefac0260
--- /dev/null
+++ b/src/era5data.py
@@ -0,0 +1,68 @@
+import cdsapi
+import os
+from os.path import join
+from helpers import read_inputs
+
+model_parameters = read_inputs('../config_model.yml')
+data_path = model_parameters['data_path']
+
+spatial_resolution = 0.25
+regions = {'EU': '75/-20/30/40'}
+
+years = ['2017', '2018']
+months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
+
+directory = join(data_path, 'input_data/resource_data/', str(spatial_resolution))
+if not os.path.exists(directory):
+    os.makedirs(directory)
+
+c = cdsapi.Client()
+for region, coords in regions.items():
+    for year in years:
+        for month in months:
+            c.retrieve(
+                'reanalysis-era5-single-levels',
+                {'variable':['100m_u_component_of_wind','100m_v_component_of_wind',
+                             '2m_temperature', 'surface_solar_radiation_downwards', 'forecast_surface_roughness'],
+                 'product_type':'reanalysis',
+                 'area': str(coords),
+                 'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
+                 'year': year,
+                 'month': month,
+                 'day': ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10',
+                         '11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
+                         '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31'],
+                 'time': ['00:00', '01:00', '02:00', '03:00', '04:00', '05:00', '06:00', '07:00', '08:00',
+                          '09:00', '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00', '17:00',
+                          '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'],
+                 'format': 'netcdf'},
+                f"{directory}/{region}_{year}_{month}.nc")
+
+c.retrieve(
+    'reanalysis-era5-single-levels',
+    {
+        'variable': ['low_vegetation_cover', 'high_vegetation_cover', 'land_sea_mask',
+                     'model_bathymetry', 'orography', 'sea_ice_cover'],
+        'product_type': 'reanalysis',
+        'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
+        'year': '2018',
+        'month': '12',
+        'day': '31',
+        'time': '00:00',
+        'format': 'netcdf'
+    },
+    f"{directory}/ERA5_surface_characteristics_20181231_{str(spatial_resolution)}.nc")
+
+c.retrieve(
+    'reanalysis-era5-single-levels',
+    {
+        'product_type': 'reanalysis',
+        'variable': ['orography', 'slope_of_sub_gridscale_orography'],
+        'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
+        'year': '2018',
+        'month': '12',
+        'day': '31',
+        'time': '00:00',
+        'format': 'netcdf'
+    },
+    f"{directory}/ERA5_orography_characteristics_20181231_{str(spatial_resolution)}.nc")
diff --git a/src/helpers.py b/src/helpers.py
index bd1f51f4eb606c3780790e9f0765cb3e8dc93fef..eed0abbe2484184a143a4eefcdcf1b36bfa1c6bc 100644
--- a/src/helpers.py
+++ b/src/helpers.py
@@ -1,186 +1,20 @@
 from copy import deepcopy
 from datetime import datetime
-from glob import glob
-from itertools import takewhile
-from operator import attrgetter
-from os import remove, getcwd, makedirs
+from os import makedirs
 from os.path import join, isdir, abspath
-from shutil import rmtree
 
+import pycountry as pyc
 import xarray as xr
 import yaml
-from geopandas import read_file
-from numpy import sqrt, hstack, arange, dtype, array, timedelta64, pi
-from pandas import DataFrame, read_excel, notnull, read_csv, date_range, to_datetime
-from scipy.spatial import distance
+from geopandas import read_file, GeoSeries
+from numpy import hstack, arange, dtype, array, timedelta64, nan, sum
+from pandas import read_csv, date_range, to_datetime, Series, notnull
 from shapely import prepared
-from shapely.geometry import Point, MultiPolygon, Polygon
+from shapely.geometry import Point
 from shapely.ops import unary_union
 from xarray import concat
 
 
-def filter_onshore_polys(polys, minarea=0.1, filterremote=True):
-    """
-    Filters onshore polygons for a given territory
-    (e.g., removing French Guyana from the polygon associated with the French shapefile).
-
-    Parameters
-    ----------
-    polys : (Multi)Polygon
-        Geometry-like object containing the shape of a given onshore region.
-    minarea : float
-        Area threshold used in the polygon selection process.
-    filterremote : boolean
-
-    Returns
-    -------
-    polys : (Multi)Polygon
-
-    """
-    if isinstance(polys, MultiPolygon):
-
-        polys = sorted(polys, key=attrgetter('area'), reverse=True)
-        mainpoly = polys[0]
-        mainlength = sqrt(mainpoly.area / (2. * pi))
-
-        if mainpoly.area > minarea:
-
-            polys = MultiPolygon([p for p in takewhile(lambda p: p.area > minarea, polys)
-                                  if not filterremote or (mainpoly.distance(p) < mainlength)])
-
-        else:
-
-            polys = mainpoly
-
-    return polys
-
-
-def filter_offshore_polys(offshore_polys, onshore_polys_union, minarea=0.1, filterremote=True):
-    """
-    Filters offshore polygons for a given territory.
-
-    Parameters
-    ----------
-    offshore_polys : (Multi)Polygon
-        Geometry-like object containing the shape of a given offshore region.
-    onshore_polys_union : (Multi)Polygon
-        Geometry-like object containing the aggregated shape of given onshore regions.
-    minarea : float
-        Area threshold used in the polygon selection process.
-    filterremote : boolean
-
-    Returns
-    -------
-    polys : (Multi)Polygon
-
-    """
-    if isinstance(offshore_polys, MultiPolygon):
-
-        offshore_polys = sorted(offshore_polys, key=attrgetter('area'), reverse=True)
-
-    else:
-
-        offshore_polys = [offshore_polys]
-
-    mainpoly = offshore_polys[0]
-    mainlength = sqrt(mainpoly.area / (5. * pi))
-    polys = []
-
-    if mainpoly.area > minarea:
-
-        for offshore_poly in offshore_polys:
-
-            if offshore_poly.area < minarea:
-                break
-
-            if isinstance(onshore_polys_union, Polygon):
-                onshore_polys_union = [onshore_polys_union]
-
-            for onshore_poly in onshore_polys_union:
-
-                if not filterremote or offshore_poly.distance(onshore_poly) < mainlength:
-                    polys.append(offshore_poly)
-
-                    break
-
-        polys = MultiPolygon(polys)
-
-    else:
-
-        polys = mainpoly
-
-    return polys
-
-
-def get_onshore_shapes(region_name_list, path_shapefile_data, minarea=0.1, filterremote=True):
-    """
-    Returns onshore shapefile associated with a given region, or list of regions.
-
-    Parameters
-    ----------
-    region_name_list : list
-        List of regions whose shapefiles are aggregated.
-    path_shapefile_data : str
-        Relative path of the shapefile data.
-    minarea : float
-    filterremote : boolean
-
-    Returns
-    -------
-    onshore_shapes : GeoDataFrame
-
-    """
-    filename = 'NUTS_RG_01M_2016_4326_LEVL_0_incl_BA.geojson'
-    onshore_shapes = read_file(join(path_shapefile_data, filename))
-
-    onshore_shapes.index = onshore_shapes['CNTR_CODE']
-
-    onshore_shapes = onshore_shapes.reindex(region_name_list)
-    onshore_shapes['geometry'] = onshore_shapes['geometry'].map(
-        lambda x: filter_onshore_polys(x, minarea, filterremote))
-
-    return onshore_shapes
-
-
-def get_offshore_shapes(region_name_list, country_shapes, path_shapefile_data, minarea=0.1, filterremote=True):
-    """
-    Returns offshore shapefile associated with a given region, or list of regions.
-
-    Parameters
-    ----------
-    region_name_list : list
-        List of regions whose shapefiles are aggregated.
-    country_shapes : GeoDataFrame
-        Dataframe containing onshore shapes of the desired regions.
-    path_shapefile_data : str
-    minarea : float
-    filterremote : boolean
-
-    Returns
-    -------
-    offshore_shapes : GeoDataFrame
-
-    """
-    filename = 'EEZ_RG_01M_2016_4326_LEVL_0.geojson'
-    offshore_shapes = read_file(join(path_shapefile_data, filename)).set_index('ISO_ID')
-
-    # Keep only associated countries
-    countries_names = [name.split('-')[0] for name in region_name_list]  # Allows to consider states and provinces
-
-    offshore_shapes = offshore_shapes.reindex(countries_names)
-    offshore_shapes['geometry'].fillna(Polygon([]), inplace=True)  # Fill nan geometries with empty Polygons
-
-    country_shapes_union = unary_union(country_shapes['geometry'].buffer(0).values)
-
-    # Keep only offshore 'close' to onshore
-    offshore_shapes['geometry'] = offshore_shapes['geometry'].map(lambda x: filter_offshore_polys(x,
-                                                                                                  country_shapes_union,
-                                                                                                  minarea,
-                                                                                                  filterremote))
-
-    return offshore_shapes
-
-
 def chunk_split(l, n):
     """
     Splits large lists in smaller chunks. Done to avoid xarray warnings when slicing large datasets.
@@ -209,11 +43,8 @@ def xarray_to_ndarray(input_dict):
     ndarray : ndarray
 
     """
-    key_list = []
-    for k1, v1 in input_dict.items():
-        for k2, v2 in v1.items():
-            key_list.append((k1, k2))
 
+    key_list = return_dict_keys(input_dict)
     array_list = []
 
     for region, tech in key_list:
@@ -224,98 +55,6 @@ def xarray_to_ndarray(input_dict):
     return ndarray
 
 
-def xarray_to_dict(input_dict, levels):
-    """
-    Converts dict of xarray objects to dict of ndarrays to be passed to the optimisation problem.
-
-    Parameters
-    ----------
-    input_dict : dict
-
-    levels : int
-        Depth of (nested) dict. Available values: 1 or 2.
-
-    Returns
-    -------
-    output_dict : dict
-
-    """
-    output_dict = deepcopy(input_dict)
-
-    if levels == 2:
-
-        key_list = return_dict_keys(input_dict)
-
-        for region, tech in key_list:
-            output_dict[region][tech] = input_dict[region][tech].values
-
-    else:
-
-        key_list = input_dict.keys()
-
-        for tech in key_list:
-            output_dict[tech] = input_dict[tech].values
-
-    return output_dict
-
-
-def retrieve_dict_max_length_item(input_dict):
-    """
-    Retrieve size of largest dict value.
-
-    Parameters
-    ----------
-    input_dict : dict
-
-    Returns
-    -------
-    max_len : int
-
-    """
-    key_list = []
-
-    for k1, v1 in input_dict.items():
-        for k2, v2 in v1.items():
-            key_list.append((k1, k2))
-
-    max_len = 0
-
-    for region, tech in key_list:
-
-        incumbent_len = len(input_dict[region][tech].locations)
-
-        if incumbent_len > max_len:
-            max_len = incumbent_len
-
-    return max_len
-
-
-def dict_to_xarray(input_dict):
-    """
-    Converts dict of xarray objects to xarray DataArray to be passed to the optimisation problem.
-
-    Parameters
-    ----------
-    input_dict : dict
-
-    Returns
-    -------
-    dataset : xr.DataArray
-
-    """
-
-    key_list = return_dict_keys(input_dict)
-
-    array_list = []
-
-    for region, tech in key_list:
-        array_list.append(input_dict[region][tech])
-
-    dataset = concat(array_list, dim='locations')
-
-    return dataset
-
-
 def collapse_dict_region_level(input_dict):
     """
     Converts nested dict (dict[region][tech]) to single-level (dict[tech]).
@@ -380,7 +119,6 @@ def concatenate_dict_keys(input_dict):
     """
 
     output_dict = {}
-
     key_list = return_dict_keys(input_dict)
 
     for region, tech in key_list:
@@ -389,258 +127,190 @@ def concatenate_dict_keys(input_dict):
     return output_dict
 
 
-def retrieve_coordinates_from_dict(input_dict):
-    """
-    Retrieves coordinate list for each (region, tech) tuple key. Requires dict values to be xarray.DataArray objects!
-
-    Parameters
-    ----------
-    input_dict : dict
-
-    Returns
-    -------
-    output_dict : dict
+def get_deployment_vector(regions, technologies, deployments):
 
-    """
-    output_dict = {}
+    d = {}
+    for i, region in enumerate(regions):
+        d[region] = {}
+        for j, tech in enumerate(technologies):
+            if isinstance(deployments[i], int):
+                d[region][tech] = deployments[i]
+            else:
+                d[region][tech] = deployments[i][j]
 
-    for concat_key in input_dict.keys():
-        output_dict[concat_key] = list(input_dict[concat_key].locations.values)
+    return d
 
-    return output_dict
+def return_region_divisions(region_list, data_path):
 
+    onshore_shapes = get_onshore_shapes(region_list, data_path)
+    region_subdivisions = None
 
-def compute_generation_potential(capacity_factor_dict, potential_dict):
-    """
-    Computes generation potential (GWh) to be passed to the optimisation problem.
+    regions = []
+    for region in region_list:
 
-    Parameters
-    ----------
-    capacity_factor_dict : dict containing capacity factor time series.
+        if region == 'EU':
+            region_subdivisions = ['AT', 'BE', 'DE', 'DK', 'ES',
+                                   'FR', 'UK', 'IE', 'IT', 'LU',
+                                   'NL', 'NO', 'PT', 'SE', 'CH', 'CZ',
+                                   'EE', 'LV', 'RO', 'BG', 'EL', 'HR', 'RS',
+                                   'FI', 'EL', 'HR', 'HU', 'LT',
+                                   'PL', 'SI', 'SK']
+        elif region == 'NA':
+            region_subdivisions = ['DZ', 'EG', 'MA', 'LY', 'TN']
+        elif region == 'ME':
+            region_subdivisions = ['AE', 'BH', 'CY', 'IR', 'IQ', 'IL', 'JO', 'KW', 'LB', 'OM', 'PS', 'QA', 'SA', 'SY',
+                                   'YE']
+        elif region in onshore_shapes.index:
+            region_subdivisions = [region]
 
-    potential_dict : dict containing technical potential figures per location.
+        regions.extend(region_subdivisions)
 
-    Returns
-    -------
-    output_dict : dict
+    return regions
 
-    """
-    output_dict = deepcopy(capacity_factor_dict)
 
-    for region in capacity_factor_dict:
-        for tech in capacity_factor_dict[region]:
-            output_dict[region][tech] = capacity_factor_dict[region][tech] * potential_dict[region][tech]
+def convert_old_country_names(c):
+    """Converting country old full names to new ones, as some datasets are not updated on the issue."""
 
-    return output_dict
+    if c == "Macedonia":
+        return "North Macedonia"
 
+    if c == "Czech Republic":
+        return "Czechia"
 
-def retrieve_tech_coordinates_tuples(input_dict):
-    """
-    Retrieves list of all (tech, loc) tuples.
+    if c == 'Syria':
+        return 'Syrian Arab Republic'
 
-    Parameters
-    ----------
-    input_dict : dict
+    if c == 'Iran':
+        return 'Iran, Islamic Republic of'
 
-    Returns
-    -------
-    l : list
+    if c == "Byelarus":
+        return "Belarus"
 
-    """
-    l = []
+    return c
 
-    for key, value in input_dict.items():
-        for idx, val in enumerate(value):
-            l.append((key, idx))
 
-    return l
+def remove_landlocked_countries(country_list):
+    """Filtering out landlocked countries."""
+    landlocked_countries = {'AT', 'BY', 'CH', 'CZ', 'HU', 'LI', 'LU', 'MD', 'MK', 'RS', 'SK'}
+    return sorted(list(set(country_list) - landlocked_countries))
 
 
-def retrieve_incidence_matrix(input_dict):
+def convert_country_codes(source_codes, source_format, target_format, throw_error = False):
     """
-    Computes the (region, tech) vs. (lon, lat) incidence matrix.
+    Convert country codes, e.g., from ISO_2 to full name.
 
     Parameters
     ----------
-    input_dict : dict containing xarray.Dataset objects indexed by (region, tech) tuples.
+    source_codes: List[str]
+        List of codes to convert.
+    source_format: str
+        Format of the source codes (alpha_2, alpha_3, name, ...)
+    target_format: str
+        Format to which code must be converted (alpha_2, alpha_3, name, ...)
+    throw_error: bool (default: False)
+        Whether to throw an error if an attribute does not exist.
 
     Returns
     -------
-    incidence_matrix : DataFrame
-
+    target_codes: List[str]
+        List of converted codes.
     """
-    coord_list = []
-    for concat_key in input_dict.keys():
-        coord_list.extend(list(input_dict[concat_key].locations.values))
-
-    idx = list(set(coord_list))
-    cols = list(input_dict.keys())
-
-    incidence_matrix = DataFrame(0, index=idx, columns=cols)
+    target_codes = []
+    for code in source_codes:
+        try:
+            country_codes = pyc.countries.get(**{source_format: code})
+            if country_codes is None:
+                raise KeyError(f"Data is not available for code {code} of type {source_format}.")
+            target_code = getattr(country_codes, target_format)
+        except (KeyError, AttributeError) as e:
+            if throw_error:
+                raise e
+            target_code = nan
+        target_codes += [target_code]
+    return target_codes
 
-    for concat_key in input_dict.keys():
 
-        coordinates = input_dict[concat_key].locations.values
-
-        for c in coordinates:
-            incidence_matrix.loc[c, concat_key] = 1
-
-    return incidence_matrix
-
-
-def retrieve_location_indices_per_tech(input_dict):
+def get_onshore_shapes(regions, data_path):
     """
-    Retrieves integer indices of locations associated with each technology.
-
-    Parameters
-    ----------
-    input_dict : dict
-
-    Returns
-    -------
-    output_dict : dict
+    Return onshore shapes from naturalearth data (ISO_2 codes).
 
     """
-    key_list = []
 
-    for k1, v1 in input_dict.items():
-        for k2, v2 in v1.items():
-            key_list.append((k1, k2))
-
-    output_dict = deepcopy(input_dict)
-
-    for region, tech in key_list:
-        output_dict[region][tech] = arange(len(input_dict[region][tech].locations))
-
-    return output_dict
+    arcgis_fn = f"{data_path}input_data/shapefiles/Longitude_Graticules_and_World_Countries_Boundaries-shp/" \
+                f"99bfd9e7-bb42-4728-87b5-07f8c8ac631c2020328-1-1vef4ev.lu5nk.shp"
+    shapes = read_file(arcgis_fn)
+    shapes["CNTRY_NAME"] = shapes["CNTRY_NAME"].apply(convert_old_country_names)
+    shapes["iso2"] = Series(convert_country_codes(shapes["CNTRY_NAME"].values, "name", "alpha_2"))
+    shapes = shapes[notnull(shapes["iso2"])]
+    shapes = shapes.set_index("iso2")['geometry']
 
+    if regions is not None:
+        missing_codes = set(regions) - set(shapes.index)
+        assert not missing_codes, f"Error: Shapes are not available for the " \
+                                  f"following codes: {sorted(list(missing_codes))}"
+        shapes = shapes[regions]
 
-def return_region_divisions(region_list, path_shapefile_data):
-    # Load countries/regions shapes
-    onshore_shapes_all = read_file(join(path_shapefile_data, 'NUTS_RG_01M_2016_4326_LEVL_0_incl_BA.geojson'))
+    return shapes
 
-    regions = []
-    for region in region_list:
 
-        if region == 'EU':
-            region_subdivisions = ['AT', 'BE', 'DE', 'DK', 'ES',
-                                   'FR', 'UK', 'IE', 'IT', 'LU',
-                                   'NL', 'NO', 'PT', 'SE', 'CH', 'CZ',
-                                   'EE', 'LV', 'RO', 'BG', 'EL', 'HR', 'RS',
-                                   'FI', 'EL', 'HR', 'HU', 'LT',
-                                   'PL', 'SI', 'SK']
-        elif region == 'NA':
-            region_subdivisions = ['DZ', 'EG', 'MA', 'LY', 'TN']
-        elif region == 'ME':
-            region_subdivisions = ['AE', 'BH', 'CY', 'IR', 'IQ', 'IL', 'JO', 'KW', 'LB', 'OM', 'PS', 'QA', 'SA', 'SY',
-                                   'YE']
-        elif region == 'US_S':
-            region_subdivisions = ['US-TX']
-        elif region == 'US_W':
-            region_subdivisions = ['US-AZ', 'US-CA', 'US-CO', 'US-MT', 'US-WY', 'US-NM',
-                                   'US-UT', 'US-ID', 'US-WA', 'US-NV', 'US-OR']
-        elif region == 'US_E':
-            region_subdivisions = ['US-ND', 'US-SD', 'US-NE', 'US-KS', 'US-OK', 'US-MN',
-                                   'US-IA', 'US-MO', 'US-AR', 'US-LA', 'US-MS', 'US-AL', 'US-TN',
-                                   'US-IL', 'US-WI', 'US-MI', 'US-IN', 'US-OH', 'US-KY', 'US-GA', 'US-FL',
-                                   'US-PA', 'US-SC', 'US-NC', 'US-VA', 'US-WV',
-                                   'US-MD', 'US-DE', 'US-NJ', 'US-NY', 'US-CT', 'US-RI',
-                                   'US-MA', 'US-VT', 'US-ME', 'US-NH']
-        elif region in onshore_shapes_all['CNTR_CODE'].values:
-            region_subdivisions = [region]
-
-        regions.extend(region_subdivisions)
-
-    return regions
-
-
-def return_region_shapefile(region, path_shapefile_data):
+def get_offshore_shapes(regions, data_path):
     """
-    Returns shapefile associated with the region(s) of interest.
-
-    Parameters
-    ----------
-    region : str
-
-    path_shapefile_data : str
-
-    Returns
-    -------
-    output_dict : dict
-        Dict object containing i) region subdivisions (if the case) and
-        ii) associated onshore and offshore shapes.
-
+    Return offshore shapes for a list of regions.
     """
 
-    # Load countries/regions shapes
-    onshore_shapes_all = read_file(join(path_shapefile_data, 'NUTS_RG_01M_2016_4326_LEVL_0_incl_BA.geojson'))
-
-    if region == 'EU':
-        region_subdivisions = ['AT', 'BE', 'DE', 'DK', 'ES',
-                               'FR', 'UK', 'IE', 'IT', 'LU',
-                               'NL', 'NO', 'PT', 'SE', 'CH', 'CZ',
-                               'AL', 'EE', 'LV',
-                               'FI', 'EL', 'HR', 'HU', 'LT',
-                               'PL', 'SI', 'SK']
-    elif region == 'NA':
-        region_subdivisions = ['DZ', 'EG', 'MA', 'LY', 'TN']
-    elif region == 'ME':
-        region_subdivisions = ['AE', 'BH', 'CY', 'IR', 'IQ', 'IL', 'JO', 'KW', 'LB', 'OM', 'PS', 'QA', 'SA', 'SY', 'YE']
-    elif region == 'US_S':
-        region_subdivisions = ['US-TX']
-    elif region == 'US_W':
-        region_subdivisions = ['US-AZ', 'US-CA', 'US-CO', 'US-MT', 'US-WY', 'US-NM',
-                               'US-UT', 'US-ID', 'US-WA', 'US-NV', 'US-OR']
-    elif region == 'US_E':
-        region_subdivisions = ['US-ND', 'US-SD', 'US-NE', 'US-KS', 'US-OK', 'US-MN',
-                               'US-IA', 'US-MO', 'US-AR', 'US-LA', 'US-MS', 'US-AL', 'US-TN',
-                               'US-IL', 'US-WI', 'US-MI', 'US-IN', 'US-OH', 'US-KY', 'US-GA', 'US-FL',
-                               'US-PA', 'US-SC', 'US-NC', 'US-VA', 'US-WV',
-                               'US-MD', 'US-DE', 'US-NJ', 'US-NY', 'US-CT', 'US-RI',
-                               'US-MA', 'US-VT', 'US-ME', 'US-NH']
-    elif region in onshore_shapes_all['CNTR_CODE'].values:
-        region_subdivisions = [region]
-    else:
-        raise ValueError(' Unknown region ', region)
+    # Remove landlocked countries for which there is no offshore shapes
+    iso_codes = remove_landlocked_countries(regions)
 
-    onshore_shapes_selected = get_onshore_shapes(region_subdivisions, path_shapefile_data)
-    offshore_shapes_selected = get_offshore_shapes(region_subdivisions, onshore_shapes_selected, path_shapefile_data)
+    eez_fn = f"{data_path}input_data/shapefiles/eez/World_EEZ_v8_2014.shp"
+    eez_shapes = read_file(eez_fn)
 
-    onshore = hstack((onshore_shapes_selected["geometry"].values))
-    offshore = hstack((offshore_shapes_selected["geometry"].values))
+    eez_shapes = eez_shapes[notnull(eez_shapes['ISO_3digit'])]
+    # Create column with ISO_A2 code.
+    eez_shapes['ISO_A2'] = convert_country_codes(eez_shapes['ISO_3digit'].values, 'alpha_3', 'alpha_2')
+    eez_shapes = eez_shapes[["geometry", "ISO_A2"]].dropna()
+    eez_shapes = eez_shapes.set_index('ISO_A2')["geometry"]
 
-    onshore_union = unary_union(onshore)
-    offshore_union = unary_union(offshore)
+    # Filter shapes
+    missing_codes = set(iso_codes) - set(eez_shapes.index)
+    assert not missing_codes, f"Error: No shapes available for codes {sorted(list(missing_codes))}"
+    eez_shapes = eez_shapes[iso_codes]
 
-    onshore_prepared = prepared.prep(onshore_union)
-    offshore_prepared = prepared.prep(offshore_union)
+    # Combine polygons corresponding to the same countries.
+    unique_codes = set(eez_shapes.index)
+    offshore_shapes = GeoSeries(name='geometry')
+    for c in unique_codes:
+        offshore_shapes[c] = unary_union(eez_shapes[c])
 
-    output_dict = {'region_subdivisions': region_subdivisions,
-                   'region_shapefiles': {'onshore': onshore_prepared,
-                                         'offshore': offshore_prepared}}
+    return offshore_shapes
 
-    return output_dict
 
+def union_regions(regions, data_path, which='both', prepped=True):
 
-def union_regions(regions, path_shapefile_data, which='both', prepped=True):
+    regions = return_region_divisions(regions, data_path)
 
-    regions = return_region_divisions(regions, path_shapefile_data)
+    if which == 'both':
+        onshore_shapes_selected = get_onshore_shapes(regions, data_path)
+        offshore_shapes_selected = get_offshore_shapes(regions, data_path)
 
-    onshore_shapes_selected = get_onshore_shapes(regions, path_shapefile_data)
-    offshore_shapes_selected = get_offshore_shapes(regions, onshore_shapes_selected, path_shapefile_data)
+        onshore = hstack(onshore_shapes_selected.values)
+        offshore = hstack(offshore_shapes_selected.values)
+        all_shapes = hstack((onshore, offshore))
 
-    onshore = hstack((onshore_shapes_selected["geometry"].buffer(0).values))
-    offshore = hstack((offshore_shapes_selected["geometry"].buffer(0).values))
-    all = hstack((onshore, offshore))
+        union = unary_union(all_shapes)
 
-    if which == 'both':
-        union = unary_union(all)
     elif which == 'onshore':
+        onshore_shapes_selected = get_onshore_shapes(regions, data_path)
+        onshore = hstack(onshore_shapes_selected.values)
+
         union = unary_union(onshore)
+
     elif which == 'offshore':
+        offshore_shapes_selected = get_offshore_shapes(regions, data_path)
+        offshore = hstack(offshore_shapes_selected.values)
+
         union = unary_union(offshore)
 
-    if prepped == True:
+    if prepped:
         full_shape = prepared.prep(union)
     else:
         full_shape = union
@@ -649,36 +319,6 @@ def union_regions(regions, path_shapefile_data, which='both', prepped=True):
 
 
 def return_coordinates_from_shapefiles(resource_dataset, shapefiles_region):
-    """
-    Returning coordinate (lon, lat) pairs falling into the region(s) of interest.
-
-    Parameters
-    ----------
-    resource_dataset : xarray.Dataset
-        Resource dataset.
-    shapefiles_region : dict
-        Dict object containing the onshore and offshore shapefiles.
-
-    Returns
-    -------
-    coordinates_in_region : list
-        List of coordinate pairs in the region of interest.
-
-    """
-    start_coordinates = list(zip(resource_dataset.longitude.values, resource_dataset.latitude.values))
-
-    coordinates_in_region_onshore = array(start_coordinates, dtype('float,float'))[
-        [shapefiles_region['onshore'].contains(Point(p)) for p in start_coordinates]].tolist()
-
-    coordinates_in_region_offshore = array(start_coordinates, dtype('float,float'))[
-        [shapefiles_region['offshore'].contains(Point(p)) for p in start_coordinates]].tolist()
-
-    coordinates_in_region = list(set(coordinates_in_region_onshore).union(set(coordinates_in_region_offshore)))
-
-    return coordinates_in_region
-
-
-def return_coordinates_from_shapefiles_light(resource_dataset, shapefiles_region):
     """
     """
     start_coordinates = list(zip(resource_dataset.longitude.values, resource_dataset.latitude.values))
@@ -690,6 +330,7 @@ def return_coordinates_from_shapefiles_light(resource_dataset, shapefiles_region
 
 
 def retrieve_load_data_partitions(path_load_data, date_slice, alpha, delta, regions, norm_type):
+
     dict_regions = {'EU': ['AT', 'BE', 'CH', 'DE', 'DK', 'ES',
                            'FR', 'UK', 'IE', 'IT', 'LU',
                            'NL', 'PT', 'SE', 'CZ',
@@ -745,145 +386,16 @@ def return_filtered_and_normed(signal, delta, type='min'):
     return l_norm.values
 
 
-def read_legacy_capacity_data(start_coordinates, region_subdivisions, tech, path_legacy_data):
-    """
-    Reads dataset of existing RES units in the given area. Available for EU only.
-
-    Parameters
-    ----------
-    start_coordinates : list
-    tech : str
-    path_legacy_data : str
-
-    Returns
-    -------
-    output_dict : dict
-        Dict object storing existing capacities per node for a given technology.
-    """
-    if (tech.split('_')[0] == 'wind') & (tech.split('_')[1] != 'floating'):
-
-        # data = read_excel(join(path_legacy_data, 'Windfarms_Europe_20200127_EL_UK_2010.xls'), sheet_name='Windfarms',
-        # header=0, usecols=[2, 5, 9, 10, 18, 22, 23], skiprows=[1])
-        data = read_excel(join(path_legacy_data, 'Windfarms_Europe_20200127_EL_UK.xls'), sheet_name='Windfarms',
-                          header=0, usecols=[2, 5, 9, 10, 18, 22, 23], skiprows=[1])
-
-        data = data[~data['Latitude'].isin(['#ND'])]
-        data = data[~data['Longitude'].isin(['#ND'])]
-        data = data[~data['Total power'].isin(['#ND'])]
-        data = data[data['Status'] != 'Dismantled']
-        data = data[data['ISO code'].isin(region_subdivisions)]
-        # data = data[data['Commissioning date'].str.startswith(tuple(['#ND', '201']), na=False)]
-
-        if tech == 'wind_onshore':
-
-            capacity_threshold = 0.2
-            data_filtered = data[data['Area'] != 'Offshore'].copy()
-
-        elif tech == 'wind_offshore':
-
-            capacity_threshold = 0.5
-            data_filtered = data[data['Area'] == 'Offshore'].copy()
-
-        asset_coordinates = array(list(zip(data_filtered['Longitude'],
-                                           data_filtered['Latitude'])))
-
-        node_list = []
-        for c in asset_coordinates:
-            node_list.append(tuple(start_coordinates[distance.cdist(start_coordinates, [c], 'euclidean').argmin()]))
-
-        data_filtered['Node'] = node_list
-        aggregate_capacity_per_node = data_filtered.groupby(['Node'])['Total power'].agg('sum')
-        aggregate_capacity_per_node = aggregate_capacity_per_node * (1e-6)
-
-        output_dict = aggregate_capacity_per_node[aggregate_capacity_per_node > capacity_threshold].to_dict()
-
-    elif (tech.split('_')[0] == 'pv') & (tech.split('_')[1] == 'utility'):
-
-        data = read_excel(join(path_legacy_data, 'Solarfarms_Europe_20200208.xlsx'), sheet_name='ProjReg_rpt',
-                          header=0, usecols=[0, 3, 4, 5, 8])
-
-        data = data[notnull(data['Coords'])]
-        data['Longitude'] = data['Coords'].str.split(',', 1).str[1]
-        data['Latitude'] = data['Coords'].str.split(',', 1).str[0]
-        data['ISO code'] = data['Country'].map(return_ISO_codes_from_countries())
-
-        data = data[data['ISO code'].isin(region_subdivisions)]
-
-        capacity_threshold = 0.05
-
-        asset_coordinates = array(list(zip(data['Longitude'],
-                                           data['Latitude'])))
-
-        node_list = []
-        for c in asset_coordinates:
-            node_list.append(tuple(start_coordinates[distance.cdist(start_coordinates, [c], 'euclidean').argmin()]))
-
-        data['Node'] = node_list
-        aggregate_capacity_per_node = data.groupby(['Node'])['MWac'].agg('sum')
-        aggregate_capacity_per_node = aggregate_capacity_per_node * (1e-3)
-
-        output_dict = aggregate_capacity_per_node[aggregate_capacity_per_node > capacity_threshold].to_dict()
-
-    else:
-
-        output_dict = None
-
-    return output_dict
-
-
-def retrieve_nodes_with_legacy_units(input_dict, region, tech, path_shapefile_data):
-    """
-    Returns list of nodes where capacity exists.
-
-    Parameters
-    ----------
-    input_dict : dict
-        Dict object storing existing capacities per node for a given technology.
-    region : str
-        Region.
-    tech : str
-        Technology.
-    path_shapefile_data : str
-
-    Returns
-    -------
-    existing_locations_filtered : array
-        Array populated with coordinate tuples where capacity exists, for a given region and technology.
-
-    """
-
-    if input_dict == None:
-
-        existing_locations_filtered = []
-
-    else:
-
-        existing_locations = list(input_dict.keys())
-
-        if tech in ['wind_offshore', 'wind_floating']:
-
-            # region_shapefile = return_region_shapefile(region, path_shapefile_data)['region_shapefiles']['offshore']
-            region_shapefile = union_regions(region, path_shapefile_data, which='offshore')
-
-        else:
-
-            # region_shapefile = return_region_shapefile(region, path_shapefile_data)['region_shapefiles']['onshore']
-            region_shapefile = union_regions(region, path_shapefile_data, which='onshore')
-
-        existing_locations_filtered = array(existing_locations, dtype('float,float'))[
-            [region_shapefile.contains(Point(p)) for p in existing_locations]].tolist()
-
-    return existing_locations_filtered
-
-
-def filter_onshore_offshore_locations(coordinates_in_region, spatial_resolution, tech_dict, tech):
+def filter_onshore_offshore_locations(coordinates_in_region, data_path, spatial_resolution, tech_dict, tech):
     """
     Filters on- and offshore coordinates.
 
     Parameters
     ----------
     coordinates_in_region : list
+    data_path: str
     spatial_resolution : float
+    tech_dict: dict
     tech : str
 
     Returns
@@ -891,12 +403,12 @@ def filter_onshore_offshore_locations(coordinates_in_region, spatial_resolution,
     updated_coordinates : list
         Coordinates filtered via land/water mask.
     """
-    filename = 'ERA5_surface_characteristics_20181231_' + str(spatial_resolution) + '.nc'
-    path_land_data = '../input_data/land_data'
 
-    dataset = xr.open_dataset(join(path_land_data, filename))
-    dataset = dataset.sortby([dataset.longitude, dataset.latitude])
+    land_fn = 'ERA5_surface_characteristics_20181231_' + str(spatial_resolution) + '.nc'
+    land_path = join(data_path, 'input_data/land_data', land_fn)
 
+    dataset = xr.open_dataset(land_path)
+    dataset = dataset.sortby([dataset.longitude, dataset.latitude])
     dataset = dataset.assign_coords(longitude=(((dataset.longitude
                                                  + 180) % 360) - 180)).sortby('longitude')
     dataset = dataset.drop('time').squeeze().stack(locations=('longitude', 'latitude'))
@@ -911,15 +423,12 @@ def filter_onshore_offshore_locations(coordinates_in_region, spatial_resolution,
     mask_offshore = array_bathymetry.where(((array_bathymetry.data < depth_threshold_low) |
                                             (array_bathymetry.data > depth_threshold_high)) |
                                            (array_watermask.data > 0.1))
-
     coords_mask_offshore = mask_offshore[mask_offshore.notnull()].locations.values.tolist()
 
     if tech in ['wind_onshore', 'pv_utility', 'pv_residential']:
-
         updated_coordinates = list(set(coordinates_in_region).intersection(set(coords_mask_offshore)))
 
     elif tech in ['wind_offshore', 'wind_floating']:
-
         updated_coordinates = list(set(coordinates_in_region).difference(set(coords_mask_offshore)))
 
     else:
@@ -928,60 +437,7 @@ def filter_onshore_offshore_locations(coordinates_in_region, spatial_resolution,
     return updated_coordinates
 
 
-def match_point_to_region(point, shape_data, indicator_data):
-    """
-    Assings a given coordinate tuple (lon, lat) to a NUTS (or any other) region.
-
-    Parameters
-    ----------
-    point : tuple
-        Coordinate in (lon, lat) form.
-    shape_data : GeoDataFrame
-        Dataframe storing geometries of NUTS regions.
-    indicator_data : dict
-        Dict object storing technical potential of NUTS regions.
-
-    Returns
-    -------
-    incumbent_region : str
-        Region in which point "p" falls.
-    """
-    dist = {}
-
-    p = Point(point)
-
-    incumbent_region = None
-
-    for subregion in list(indicator_data.keys()):
-
-        if subregion in shape_data.index:
-
-            if p.within(shape_data.loc[subregion, 'geometry']):
-                incumbent_region = subregion
-
-            dist[subregion] = p.distance(shape_data.loc[subregion, 'geometry'])
-
-    if incumbent_region == None:
-        print(p, min(dist, key=dist.get))
-
-        incumbent_region = min(dist, key=dist.get)
-
-    return incumbent_region
-
-
-def return_ISO_codes_from_countries():
-    dict_ISO = {'Albania': 'AL', 'Armenia': 'AR', 'Belarus': 'BL', 'Belgium': 'BE', 'Bulgaria': 'BG', 'Croatia': 'HR',
-                'Cyprus': 'CY', 'Czech Republic': 'CZ', 'Estonia': 'EE', 'Latvia': 'LV', 'Lithuania': 'LT',
-                'Denmark': 'DK', 'France': 'FR', 'Germany': 'DE', 'Greece': 'EL', 'Hungary': 'HU', 'Ireland': 'IE',
-                'Italy': 'IT', 'Macedonia': 'MK', 'Malta': 'MT', 'Norway': 'NO', 'Iceland': 'IS', 'Finland': 'FI',
-                'Montenegro': 'MN', 'Netherlands': 'NL', 'Poland': 'PL', 'Portugal': 'PT', 'Romania': 'RO',
-                'Slovak Republic': 'SK', 'Spain': 'ES', 'Sweden': 'SE',
-                'Switzerland': 'CH', 'Turkey': 'TR', 'Ukraine': 'UA', 'United Kingdom': 'UK'}
-
-    return dict_ISO
-
-
-def get_partition_index(input_dict, deployment_vector, capacity_split='per_tech'):
+def get_partition_index(input_dict):
     """
     Returns start and end indices for each (region, technology) tuple. Required in case the problem
     is defined with partitioning constraints.
@@ -990,133 +446,54 @@ def get_partition_index(input_dict, deployment_vector, capacity_split='per_tech'
     ----------
     input_dict : dict
         Dict object storing coordinates per region and tech.
-    deployment_vector : list
-        List containing the deployment requirements (un-partitioned or not).
-    capacity_split : str
-        Capacity splitting rule. To choose between "per_tech" and "per_country".
 
     Returns
     -------
-    index_list : list
-        List of indices associated with each (region, technology) tuple.
+    index_dict : dict
+        Dict of indices associated with each (region, technology) tuple.
 
     """
     key_list = return_dict_keys(input_dict)
-
-    init_index_dict = deepcopy(input_dict)
-
-    regions = list(set([i[0] for i in key_list]))
-    technologies = list(set([i[1] for i in key_list]))
+    index_dict = deepcopy(input_dict)
 
     start_index = 0
     for region, tech in key_list:
-        init_index_dict[region][tech] = list(arange(start_index, start_index + len(input_dict[region][tech])))
+        indices = list(arange(start_index, start_index + len(input_dict[region][tech])))
+        index_dict[region][tech] = [i + 1 for i in indices]
         start_index = start_index + len(input_dict[region][tech])
 
-    if capacity_split == 'per_country':
-
-        if len(deployment_vector) == len(regions):
-
-            index_dict = {key: None for key in regions}
-            for region in regions:
-                index_list_per_region = []
-                tech_list_in_region = [i[1] for i in key_list if i[0] == region]
-                for tech in tech_list_in_region:
-                    index_list_per_region.extend(init_index_dict[region][tech])
-                index_dict[region] = [i for i in index_list_per_region]
-
-            for region in regions:
-                index_dict[region] = [i + 1 for i in index_dict[region]]
-
-        else:
-
-            raise ValueError(' Number of regions ({}) does not match number of deployment constraints ({}).'.format
-                             (len(regions), len(deployment_vector)))
-
-    elif capacity_split == 'per_tech':
-
-        if len(deployment_vector) == len(technologies):
-
-            index_dict = {key: None for key in technologies}
-            for tech in technologies:
-                index_list_per_tech = []
-                region_list_with_tech = [i[0] for i in key_list if i[1] == tech]
-                for region in region_list_with_tech:
-                    index_list_per_tech.extend(init_index_dict[region][tech])
-                index_dict[tech] = [i for i in index_list_per_tech]
-
-            for tech in technologies:
-                index_dict[tech] = [i + 1 for i in index_dict[tech]]
-
-        else:
-
-            raise ValueError(' Number of technologies ({}) does not match number of deployment constraints ({}).'.format
-                             (len(technologies), len(deployment_vector)))
-
-    elif capacity_split == 'per_country_and_tech':
-
-        index_dict = init_index_dict
-
-        for region, tech in key_list:
-            index_dict[region][tech] = [i + 1 for i in index_dict[region][tech]]
-
     return index_dict
 
 
-def read_inputs(inputs):
-    """
-
-    Parameters
-    ----------
-    inputs :
-
-    Returns
-    -------
-
-    """
-    with open(inputs) as infile:
-        data = yaml.safe_load(infile)
-
-    return data
-
-
 def init_folder(parameters, c, suffix=None):
     """Initilize an output folder.
 
     Parameters:
-
     ------------
-
     parameters : dict
         Parameters dictionary.
 
     Returns:
-
     ------------
-
     path : str
         Relative path of the folder.
 
-
     """
-    prefix = str(parameters['name_prefix'])
-    no_locs = sum({k: sum(v.values()) for k, v in parameters['deployment_vector'].items()}.values())
-    no_part = len(parameters['deployment_vector'])
-    no_yrs = int(round((to_datetime(parameters['time_slice'][1]) - to_datetime(parameters['time_slice'][0])) / timedelta64(1, 'Y'), 0))
+    output_data_path = join(parameters['data_path'], 'output_data')
 
-    if not isdir("../output_data"):
-        makedirs(abspath("../output_data"))
+    no_locs = str(sum(parameters['deployments']))
+    no_part = str(len(parameters['regions']))
+    no_yrs = str(int(round((to_datetime(parameters['time_slice'][1]) -
+                            to_datetime(parameters['time_slice'][0])) / timedelta64(1, 'Y'), 0)))
+    c = str(c)
 
-        path = abspath('../output_data/' + prefix + str(no_yrs) + 'y_n' + str(no_locs) + '_k' + str(no_part) + '_c' + str(c) + suffix)
-        makedirs(path)
-    else:
-        path = abspath('../output_data/' + prefix + str(no_yrs) + 'y_n' + str(no_locs) + '_k' + str(no_part) + '_c' + str(c) + suffix)
-        makedirs(path)
+    if not isdir(output_data_path):
+        makedirs(abspath(output_data_path))
 
-    custom_log(' Folder path is: {}'.format(str(path)))
+    path = abspath(output_data_path + '/' + no_yrs + 'y_n' + no_locs + '_k' + no_part + '_c' + c + suffix)
+    makedirs(path)
 
-    if parameters['keep_files'] == False:
-        custom_log(' WARNING! Files will be deleted at the end of the run.')
+    custom_log(f"Folder path is: {path}")
 
     return path
 
@@ -1132,8 +509,7 @@ def generate_jl_input(deployment_dict, filtered_coordinates):
 
     deployment_dict_int = dict(zip(int_to_region_map.values(), concat_deployment_dict.values()))
 
-    index_dict = concatenate_dict_keys(get_partition_index(filtered_coordinates, deployment_dict,
-                                                           capacity_split='per_country_and_tech'))
+    index_dict = concatenate_dict_keys(get_partition_index(filtered_coordinates))
     index_dict_swap = {k: oldk for oldk, oldv in index_dict.items() for k in oldv}
     for key, value in index_dict_swap.items():
         index_dict_swap[key] = int_to_region_map[value]
@@ -1152,3 +528,20 @@ def custom_log(message):
 
     """
     print(datetime.now().strftime('%H:%M:%S') + ' --- ' + str(message))
+
+
+def read_inputs(inputs):
+    """
+
+    Parameters
+    ----------
+    inputs :
+
+    Returns
+    -------
+
+    """
+    with open(inputs) as infile:
+        data = yaml.safe_load(infile)
+
+    return data
diff --git a/src/jl/SitingHeuristics.jl b/src/jl/SitingHeuristics.jl
index e5ac3c8cc45be6eef6d646d18270872c5134ea87..569f0db9d22d454893edd695fd864a6edaa08ca8 100644
--- a/src/jl/SitingHeuristics.jl
+++ b/src/jl/SitingHeuristics.jl
@@ -3,9 +3,6 @@ using PyCall
 include("optimisation_models.jl")
 include("MCP_heuristics.jl")
 
-module SitingHeuristics
-
-# Simulated Annealing & Local Search algorithm
 function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run)
 
   index_dict = Dict([(convert(Int64, k), convert(Int64, index_dict[k])) for k in keys(index_dict)])
@@ -58,34 +55,6 @@ function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run)
 
 end
 
-# Random Search algorithm
-function main_RAND(deployment_dict, D, c, run)
-
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-  c = convert(Float64, c)
-
-  if run == "RS"
-
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    runs = convert(Int64, I*E)
-
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r] = random_search(D, c, n, runs)
-    end
-
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol
-
-end
-
-# Greedy algorithms
 function main_GRED(deployment_dict, D, c, R, eps, run)
 
   deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
@@ -126,7 +95,33 @@ function main_GRED(deployment_dict, D, c, R, eps, run)
 
 end
 
-#Local Search algorithm
+function main_RAND(deployment_dict, D, c, run)
+
+  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
+  D  = convert.(Float64, D)
+  c = convert(Float64, c)
+
+  if run == "RS"
+
+    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
+    n = convert(Float64, deployment_dict[1])
+    runs = convert(Int64, I*E)
+
+    for r = 1:R
+      println("Run ", r, "/", R)
+      x_sol[r, :], LB_sol[r] = random_search(D, c, n, runs)
+    end
+
+  else
+    println("No such run available.")
+    throw(ArgumentError)
+  end
+
+  return x_sol, LB_sol
+
+end
+
+# Local Search algorithm
 function main_LSEA(index_dict, deployment_dict, D, c, N, I, E, run)
 
   index_dict = Dict([(convert(Int64, k), convert(Int64, index_dict[k])) for k in keys(index_dict)])
@@ -160,6 +155,4 @@ function main_LSEA(index_dict, deployment_dict, D, c, N, I, E, run)
 
   return x_sol, LB_sol, obj_sol
 
-end
-
-end
+end
\ No newline at end of file
diff --git a/src/jl/SitingHeuristics_GRED.jl b/src/jl/SitingHeuristics_GRED.jl
deleted file mode 100644
index 881991e72be04e9b0341f62bcd1ca1dd86447e39..0000000000000000000000000000000000000000
--- a/src/jl/SitingHeuristics_GRED.jl
+++ /dev/null
@@ -1,44 +0,0 @@
-using PyCall
-
-include("optimisation_models.jl")
-include("MCP_heuristics.jl")
-
-function main_GRED(deployment_dict, D, c, R, eps, run)
-
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-  c = convert(Float64, c)
-  R = convert(Int64, R)
-
-  W, L = size(D)
-
-  if run == "RGH"
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r] = randomised_greedy_heuristic(D, c, n)
-    end
-  elseif run == "CGA"
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r] = classic_greedy_algorithm(D, c, n)
-    end
-  elseif run == "SGA"
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    eps = convert(Float64, 0.001)
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r] = stochastic_greedy_algorithm(D, c, n, eps)
-    end
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol
-
-end
diff --git a/src/jl/SitingHeuristics_LSEA.jl b/src/jl/SitingHeuristics_LSEA.jl
deleted file mode 100644
index 2d8bda99db77653f9c843a932a71959f7bf540e3..0000000000000000000000000000000000000000
--- a/src/jl/SitingHeuristics_LSEA.jl
+++ /dev/null
@@ -1,39 +0,0 @@
-using PyCall
-
-include("optimisation_models.jl")
-include("MCP_heuristics.jl")
-
-function main_LSEA(index_dict, deployment_dict, D, c, N, I, E, run)
-
-  index_dict = Dict([(convert(Int64, k), convert(Int64, index_dict[k])) for k in keys(index_dict)])
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-
-  c = convert(Float64, c)
-  N = convert(Int64, N)
-  I = convert(Int64, I)
-  E = convert(Int64, E)
-  T_init = convert(Float64, T_init)
-  R = convert(Int64, R)
-  run = string(run)
-
-  W, L = size(D)
-
-  P = maximum(values(index_dict))
-  n_partitions = [deployment_dict[i] for i in 1:P]
-
-  if run == "GLS"
-    x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I)
-    x_init = solve_MILP_partitioning(D, c, n_partitions, index_dict, "Gurobi")
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r], obj_sol[r, :] = greedy_local_search_partition(D, c, n_partitions, N, I, E, x_init, index_dict)
-    end
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol, obj_sol
-
-end
diff --git a/src/jl/SitingHeuristics_MIRSA.jl b/src/jl/SitingHeuristics_MIRSA.jl
deleted file mode 100644
index bebf2662593c8e26b6775f61596acac5b13d68a8..0000000000000000000000000000000000000000
--- a/src/jl/SitingHeuristics_MIRSA.jl
+++ /dev/null
@@ -1,56 +0,0 @@
-using PyCall
-
-include("optimisation_models.jl")
-include("MCP_heuristics.jl")
-
-function main_MIRSA(index_dict, deployment_dict, D, c, N, I, E, T_init, R, run)
-
-  index_dict = Dict([(convert(Int64, k), convert(Int64, index_dict[k])) for k in keys(index_dict)])
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-
-  c = convert(Float64, c)
-  N = convert(Int64, N)
-  I = convert(Int64, I)
-  E = convert(Int64, E)
-  T_init = convert(Float64, T_init)
-  R = convert(Int64, R)
-  run = string(run)
-
-  W, L = size(D)
-
-  P = maximum(values(index_dict))
-  n_partitions = [deployment_dict[i] for i in 1:P]
-
-  if run == "SALS"
-
-    x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I)
-    x_init = solve_MILP_partitioning(D, c, n_partitions, index_dict, "Gurobi")
-
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search_partition(D, c, n_partitions, N, I, E, x_init, T_init, index_dict)
-    end
-
-  elseif run == "SALSR"
-
-    x_sol, LB_sol, obj_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R), Array{Float64, 2}(undef, R, I)
-    x_init = zeros(Float64, L)
-    ind = collect(1:L)
-    x_ind = sample(ind, convert(Int64, deployment_dict[1]))
-    x_init[x_ind] .= 1.
-    n = convert(Float64, deployment_dict[1])
-
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r], obj_sol[r, :] = simulated_annealing_local_search(D, c, n, N, I, E, x_init, T_init)
-    end
-
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol, obj_sol
-
-end
diff --git a/src/jl/SitingHeuristics_RAND.jl b/src/jl/SitingHeuristics_RAND.jl
deleted file mode 100644
index e0fa74ea38a9882a2f1997c93ead6e5fd90fa4fc..0000000000000000000000000000000000000000
--- a/src/jl/SitingHeuristics_RAND.jl
+++ /dev/null
@@ -1,30 +0,0 @@
-using PyCall
-
-include("optimisation_models.jl")
-include("MCP_heuristics.jl")
-
-function main_RAND(deployment_dict, D, c, run)
-
-  deployment_dict = Dict([(convert(Int64, k), convert(Int64, deployment_dict[k])) for k in keys(deployment_dict)])
-  D  = convert.(Float64, D)
-  c = convert(Float64, c)
-
-  if run == "RS"
-
-    x_sol, LB_sol = Array{Float64, 2}(undef, R, L), Array{Float64, 1}(undef, R)
-    n = convert(Float64, deployment_dict[1])
-    runs = convert(Int64, I*E)
-
-    for r = 1:R
-      println("Run ", r, "/", R)
-      x_sol[r, :], LB_sol[r] = random_search(D, c, n, runs)
-    end
-
-  else
-    println("No such run available.")
-    throw(ArgumentError)
-  end
-
-  return x_sol, LB_sol
-
-end
diff --git a/src/main.py b/src/main.py
index ad02e0371459f2b8913d3b0e8468d21293c554f6..63211ea78e566cca62c3fe4d907fe7aaa85dacb9 100644
--- a/src/main.py
+++ b/src/main.py
@@ -1,49 +1,56 @@
 import pickle
 import yaml
-from os.path import join, isfile
+from os.path import join
 from numpy import array
+from itertools import cycle
 from pyomo.opt import SolverFactory
 import time
 
-from src.helpers import read_inputs, init_folder, custom_log, generate_jl_input
-from src.models import preprocess_input_data, build_model
-from src.tools import retrieve_location_dict, retrieve_site_data
+from helpers import read_inputs, init_folder, custom_log, xarray_to_ndarray, generate_jl_input, get_deployment_vector
+from tools import read_database, return_filtered_coordinates, selected_data, return_output, resource_quality_mapping, \
+    critical_window_mapping, sites_position_mapping, retrieve_location_dict, retrieve_site_data
+from models import build_ip_model
 
 if __name__ == '__main__':
 
-    parameters = read_inputs('../config_model.yml')
-    keepfiles = parameters['keep_files']
+    custom_log(' Starting data pre-processing')
 
-    if isfile('../input_data/criticality_matrix.p'):
-        custom_log(' WARNING! Instance data read from files. Make sure the files are the ones that you need.')
-        criticality_data = pickle.load(open('../input_data/criticality_matrix.p', 'rb'))
-        coordinates_data_on_loc = pickle.load(open('../input_data/coordinates_data.p', 'rb'))
-        output_data = pickle.load(open('../input_data/output_data.p', 'rb'))
-        site_positions = pickle.load(open('../input_data/site_positions.p', 'rb'))
-    else:
-        custom_log('Files not available.')
-        input_dict = preprocess_input_data(parameters)
-        criticality_data = input_dict['criticality_data']
-        coordinates_data_on_loc = input_dict['coordinates_data']
-        output_data = input_dict['capacity_factor_data']
-        site_positions = input_dict['site_positions_in_matrix']
+    model_parameters = read_inputs('../config_model.yml')
+    siting_parameters = model_parameters['siting_params']
+    tech_parameters = read_inputs('../config_techs.yml')
+
+    data_path = model_parameters['data_path']
+    spatial_resolution = model_parameters['spatial_resolution']
+    time_horizon = model_parameters['time_slice']
+    deployment_dict = get_deployment_vector(model_parameters['regions'],
+                                            model_parameters['technologies'],
+                                            model_parameters['deployments'])
+
+    database = read_database(data_path, spatial_resolution)
+    site_coordinates = return_filtered_coordinates(database, model_parameters, tech_parameters)
+
+    truncated_data = selected_data(database, site_coordinates, time_horizon)
+    output_data = return_output(truncated_data, data_path)
+
+    smooth_data = resource_quality_mapping(output_data, siting_parameters)
+    criticality_data = xarray_to_ndarray(critical_window_mapping(smooth_data, model_parameters))
+    site_positions = sites_position_mapping(smooth_data)
 
-        pickle.dump(input_dict['criticality_data'], open('../input_data/criticality_matrix.p', 'wb'), protocol=4)
-        pickle.dump(input_dict['coordinates_data'], open('../input_data/coordinates_data.p', 'wb'), protocol=4)
-        pickle.dump(input_dict['capacity_factor_data'], open('../input_data/output_data.p', 'wb'), protocol=4)
-        pickle.dump(input_dict['site_positions_in_matrix'], open('../input_data/site_positions.p', 'wb'), protocol=4)
+    custom_log(' Data read. Building model.')
 
-    if parameters['solution_method']['BB']['set']:
+    if siting_parameters['solution_method']['BB']['set']:
 
         custom_log(' BB chosen to solve the IP.')
-        params = parameters['solution_method']['BB']
+        params = siting_parameters['solution_method']['BB']
 
         if not isinstance(params['c'], int):
             raise ValueError(' Values of c have to be integers for the Branch & Bound set-up.')
 
-        output_folder = init_folder(parameters, params['c'], '_BB')
+        output_folder = init_folder(model_parameters, params['c'], '_BB')
         with open(join(output_folder, 'config_model.yaml'), 'w') as outfile:
-            yaml.dump(parameters, outfile, default_flow_style=False, sort_keys=False)
+            yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False)
+        with open(join(output_folder, 'config_techs.yaml'), 'w') as outfile:
+            yaml.dump(tech_parameters, outfile, default_flow_style=False, sort_keys=False)
 
         # Solver options for the MIP problem
         opt = SolverFactory(params['solver'])
@@ -51,7 +58,7 @@ if __name__ == '__main__':
         opt.options['Threads'] = params['threads']
         opt.options['TimeLimit'] = params['timelimit']
 
-        instance = build_model(parameters, coordinates_data_on_loc, criticality_data, output_folder, write_lp=False)
+        instance = build_ip_model(deployment_dict, site_coordinates, criticality_data, params['c'], output_folder)
         custom_log(' Sending model to solver.')
 
         results = opt.solve(instance, tee=True, keepfiles=False,
@@ -59,105 +66,113 @@ if __name__ == '__main__':
 
         objective = instance.objective()
         x_values = array(list(instance.x.extract_values().values()))
-        comp_location_dict = retrieve_location_dict(x_values, parameters, site_positions)
-        retrieve_site_data(parameters, coordinates_data_on_loc, output_data, criticality_data, site_positions,
-                           params['c'], comp_location_dict, objective, output_folder)
+        comp_location_dict = retrieve_location_dict(x_values, model_parameters, site_positions)
+        retrieve_site_data(model_parameters, deployment_dict, site_coordinates, output_data, criticality_data,
+                           site_positions, params['c'], comp_location_dict, objective, output_folder)
 
-    elif parameters['solution_method']['MIRSA']['set']:
+    elif siting_parameters['solution_method']['MIRSA']['set']:
 
-        import julia
         custom_log(' MIRSA chosen to solve the IP. Opening a Julia instance.')
-        params = parameters['solution_method']['MIRSA']
+        params = siting_parameters['solution_method']['MIRSA']
 
         if not isinstance(params['c'], list):
             raise ValueError(' Values of c have to elements of a list for the heuristic set-up.')
 
-        jl_dict = generate_jl_input(parameters['deployment_vector'], coordinates_data_on_loc)
+        jl_dict = generate_jl_input(deployment_dict, site_coordinates)
 
+        import julia
         j = julia.Julia(compiled_modules=False)
-        fn = j.include("jl/SitingHeuristics_MIRSA.jl")
+        from julia import Main
+        Main.include("jl/SitingHeuristics.jl")
 
         for c in params['c']:
             print('Running heuristic for c value of', c)
             start = time.time()
-            jl_selected, jl_objective, jl_traj = fn(jl_dict['index_dict'], jl_dict['deployment_dict'],
-                                                    criticality_data, c, params['neighborhood'], params['no_iterations'],
-                                                    params['no_epochs'], params['initial_temp'], params['no_runs'],
-                                                    params['algorithm'])
+            jl_selected, jl_objective, jl_traj = Main.main_MIRSA(jl_dict['index_dict'], jl_dict['deployment_dict'],
+                                                                 criticality_data, c, params['neighborhood'],
+                                                                 params['no_iterations'], params['no_epochs'],
+                                                                 params['initial_temp'], params['no_runs'],
+                                                                 params['algorithm'])
 
             if params['purpose'] == 'planning':
                 seed = params['seed']
                 for i in range(jl_selected.shape[0]):
 
-                    output_folder = init_folder(parameters, c, suffix='_MIRSA_seed' + str(seed))
+                    output_folder = init_folder(model_parameters, c, suffix='_MIRSA_seed' + str(seed))
                     seed += 1
 
                     with open(join(output_folder, 'config_model.yaml'), 'w') as outfile:
-                        yaml.dump(parameters, outfile, default_flow_style=False, sort_keys=False)
+                        yaml.dump(model_parameters, outfile, default_flow_style=False, sort_keys=False)
+                    with open(join(output_folder, 'config_techs.yaml'), 'w') as outfile:
+                        yaml.dump(tech_parameters, outfile, default_flow_style=False, sort_keys=False)
 
                     jl_selected_seed = jl_selected[i, :]
                     jl_objective_seed = jl_objective[i]
 
-                    jl_locations = retrieve_location_dict(jl_selected_seed, parameters, site_positions)
-                    retrieve_site_data(parameters, coordinates_data_on_loc, output_data, criticality_data,
-                                       site_positions, c, jl_locations, jl_objective_seed, output_folder)
+                    jl_locations = retrieve_location_dict(jl_selected_seed, model_parameters, site_positions)
+                    retrieve_site_data(model_parameters, deployment_dict, site_coordinates, output_data,
+                                       criticality_data, site_positions, c, jl_locations, jl_objective_seed,
+                                       output_folder, benchmarks=True)
             else:
 
-                output_folder = init_folder(parameters, suffix='_c' + str(c) + '_MIRSA')
+                output_folder = init_folder(model_parameters, suffix='_c' + str(c) + '_MIRSA')
 
                 pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
                 pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
                 pickle.dump(jl_traj, open(join(output_folder, 'trajectory_matrix.p'), 'wb'))
 
-    # elif parameters['solution_method']['RAND']['set']:
-    #
-    #     import julia
-    #     custom_log(' Locations to be chosen via random search.')
-    #     params = parameters['solution_method']['RAND']
-    #
-    #     if not isinstance(params['c'], list):
-    #         raise ValueError(' Values of c have to provided as list for the RAND set-up.')
-    #     if len(parameters['technologies']) > 1:
-    #         raise ValueError(' This method is currently implemented for one single technology only.')
-    #
-    #     jl_dict = generate_jl_input(parameters['deployment_vector'], coordinates_data_on_loc)
-    #
-    #     j = julia.Julia(compiled_modules=False)
-    #     fn = j.include("jl/SitingHeuristics_RAND.jl")
-    #
-    #     for c in params['c']:
-    #         print('Running heuristic for c value of', c)
-    #
-    #         jl_selected, jl_objective = fn(jl_dict['deployment_dict'], criticality_data, c, params['algorithm'])
-    #
-    #         output_folder = init_folder(parameters, suffix='_c' + str(c) + '_RS')
-    #
-    #         pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
-    #         pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
-    #
-    # elif parameters['solution_method']['GRED']['set']:
-    #
-    #     import julia
-    #     custom_log(' GRED chosen to solve the IP. Opening a Julia instance.')
-    #     params = parameters['solution_method']['GRED']
-    #
-    #     if not isinstance(params['c'], list):
-    #         raise ValueError(' Values of c have to elements of a list for the heuristic set-up.')
-    #
-    #     jl_dict = generate_jl_input(parameters['deployment_vector'], coordinates_data_on_loc)
-    #
-    #     j = julia.Julia(compiled_modules=False)
-    #     fn = j.include("jl/SitingHeuristics_GRED.jl")
-    #
-    #     for c in params['c']:
-    #         print('Running heuristic for c value of', c)
-    #         jl_selected, jl_objective = fn(jl_dict['deployment_dict'], criticality_data, c, params['no_runs'],
-    #                                        params['eps'], params['algorithm'])
-    #
-    #         output_folder = init_folder(parameters, suffix='_c' + str(c) + '_GRED')
-    #
-    #         pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
-    #         pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
+    elif siting_parameters['solution_method']['RAND']['set']:
+
+        custom_log(' Locations to be chosen via random search. Resulting coordinates are not obtained!')
+        params = model_parameters['solution_method']['RAND']
+
+        if not isinstance(params['c'], list):
+            raise ValueError(' Values of c have to provided as list for the RAND set-up.')
+        if len(model_parameters['technologies']) > 1:
+            raise ValueError(' This method is currently implemented for one single technology only.')
+
+        jl_dict = generate_jl_input(deployment_dict, site_coordinates)
+
+        import julia
+        j = julia.Julia(compiled_modules=False)
+        from julia import Main
+        Main.include("jl/SitingHeuristics.jl")
+
+        for c in params['c']:
+            print('Running heuristic for c value of', c)
+
+            jl_selected, jl_objective = Main.main_RAND(jl_dict['deployment_dict'], criticality_data,
+                                                       c, params['algorithm'])
+
+            output_folder = init_folder(model_parameters, suffix='_c' + str(c) + '_RS')
+
+            pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
+            pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
+
+    elif siting_parameters['solution_method']['GRED']['set']:
+
+        custom_log(' GRED chosen to solve the IP. Opening a Julia instance. Resulting coordinates are not obtained!')
+        params = model_parameters['solution_method']['GRED']
+
+        if not isinstance(params['c'], list):
+            raise ValueError(' Values of c have to elements of a list for the heuristic set-up.')
+
+        jl_dict = generate_jl_input(deployment_dict, site_coordinates)
+
+        import julia
+        j = julia.Julia(compiled_modules=False)
+        from julia import Main
+        Main.include("jl/SitingHeuristics.jl")
+
+        for c in params['c']:
+            print('Running heuristic for c value of', c)
+            jl_selected, jl_objective = Main.main_GRED(jl_dict['deployment_dict'], criticality_data, c,
+                                                       params['no_runs'], params['eps'], params['algorithm'])
+
+            output_folder = init_folder(model_parameters, suffix='_c' + str(c) + '_GRED')
+
+            pickle.dump(jl_selected, open(join(output_folder, 'solution_matrix.p'), 'wb'))
+            pickle.dump(jl_objective, open(join(output_folder, 'objective_vector.p'), 'wb'))
 
     else:
         raise ValueError(' This solution method is not available. ')
diff --git a/src/models.py b/src/models.py
index a1b4026d33e9c9d45ce62c958a085c9693c6e98d..7c311f45aa42d61822da7dc358e483cdf18427db 100644
--- a/src/models.py
+++ b/src/models.py
@@ -5,92 +5,10 @@ from pyomo.environ import ConcreteModel, Var, Binary, maximize
 from pyomo.opt import ProblemFormat
 from pypsa.opt import l_constraint, LConstraint, l_objective, LExpression
 
-from src.helpers import custom_log, xarray_to_ndarray
-from src.tools import read_database, return_filtered_coordinates, selected_data, return_output, \
-    resource_quality_mapping, critical_window_mapping, retrieve_index_dict, critical_data_position_mapping
+from tools import retrieve_index_dict
 
 
-def preprocess_input_data(model_parameters):
-    """Data pre-processing.
-
-    Parameters:
-
-    ------------
-
-    parameters : dict
-        Dict containing parameters of the run.
-
-
-    Returns:
-
-    -----------
-
-    output_dictionary : dict
-        Dict containing various data structures.
-
-    """
-
-    regions = model_parameters['regions']
-    technologies = model_parameters['technologies']
-
-    time_horizon = model_parameters['time_slice']
-    spatial_resolution = model_parameters['spatial_resolution']
-
-    path_resource_data = model_parameters['path_resource_data'] + '/' + str(spatial_resolution)
-    path_transfer_function_data = model_parameters['path_transfer_function_data']
-    path_population_density_data = model_parameters['path_population_density_data']
-    path_land_data = model_parameters['path_land_data']
-    path_load_data = model_parameters['path_load_data']
-    path_shapefile_data = model_parameters['path_shapefile_data']
-
-    resource_quality_layer = model_parameters['resource_quality_layer']
-    population_layer = model_parameters['population_density_layer']
-    protected_areas_layer = model_parameters['protected_areas_layer']
-    bathymetry_layer = model_parameters['bathymetry_layer']
-    forestry_layer = model_parameters['forestry_layer']
-    orography_layer = model_parameters['orography_layer']
-    water_mask_layer = model_parameters['water_mask_layer']
-    latitude_layer = model_parameters['latitude_layer']
-    distance_layer = model_parameters['distance_layer']
-
-    delta = model_parameters['delta']
-    alpha = model_parameters['alpha']
-    measure = model_parameters['smooth_measure']
-    norm_type = model_parameters['norm_type']
-
-    database = read_database(path_resource_data)
-    filtered_coordinates = return_filtered_coordinates(database, spatial_resolution, technologies, regions,
-                                                     path_land_data, path_resource_data,
-                                                     path_shapefile_data, path_population_density_data,
-                                                     resource_quality_layer=resource_quality_layer,
-                                                     population_density_layer=population_layer,
-                                                     protected_areas_layer=protected_areas_layer,
-                                                     orography_layer=orography_layer,
-                                                     forestry_layer=forestry_layer,
-                                                     water_mask_layer=water_mask_layer,
-                                                     bathymetry_layer=bathymetry_layer,
-                                                     latitude_layer=latitude_layer,
-                                                     distance_layer=distance_layer)
-
-    truncated_data = selected_data(database, filtered_coordinates, time_horizon)
-    output_data = return_output(truncated_data, path_transfer_function_data)
-
-    smooth_data = resource_quality_mapping(output_data, delta, measure)
-    critical_data = critical_window_mapping(smooth_data, alpha, delta, regions, time_horizon, path_load_data, norm_type)
-    position_mapping, filtered_coordinates_on_loc = critical_data_position_mapping(critical_data, filtered_coordinates)
-
-    output_dict = {
-                'coordinates_data': filtered_coordinates_on_loc,
-                'capacity_factor_data': output_data,
-                'criticality_data': xarray_to_ndarray(critical_data),
-                'site_positions_in_matrix': position_mapping}
-
-    custom_log(' Input data read...')
-
-    return output_dict
-
-
-def build_model(model_parameters, coordinate_dict, D, output_folder, write_lp=False):
+def build_ip_model(deployment_dict, coordinate_dict, critical_matrix, c, output_folder, write_lp=False):
     """Model build-up.
 
     Parameters:
@@ -104,12 +22,11 @@ def build_model(model_parameters, coordinate_dict, D, output_folder, write_lp=Fa
 
     """
 
-    no_windows = D.shape[0]
-    no_locations = D.shape[1]
+    no_windows = critical_matrix.shape[0]
+    no_locations = critical_matrix.shape[1]
 
-    n, dict_deployment, partitions, indices = retrieve_index_dict(model_parameters, coordinate_dict)
+    n, dict_deployment, partitions, indices = retrieve_index_dict(deployment_dict, coordinate_dict)
 
-    c = model_parameters['solution_method']['BB']['c']
     for item in partitions:
         if item in indices:
             if dict_deployment[item] > len(indices[item]):
@@ -129,7 +46,7 @@ def build_model(model_parameters, coordinate_dict, D, output_folder, write_lp=Fa
     activation_constraint = {}
 
     for w in model.W:
-        lhs = LExpression([(D[w - 1, l - 1], model.x[l]) for l in model.L])
+        lhs = LExpression([(critical_matrix[w - 1, l - 1], model.x[l]) for l in model.L])
         rhs = LExpression([(c, model.y[w])])
 
         activation_constraint[w] = LConstraint(lhs, ">=", rhs)
diff --git a/src/tools.py b/src/tools.py
index 4c56fd748677ed408fa119db1b681cdf0ba741fb..cba4a6dab7a2c0fc37cbcfb8c0d91a25b0762b47 100644
--- a/src/tools.py
+++ b/src/tools.py
@@ -10,32 +10,33 @@ import geopy.distance
 import xarray as xr
 import xarray.ufuncs as xu
 from geopandas import read_file
-from numpy import arange, interp, float32, float64, datetime64, sqrt, asarray, newaxis, sum, max, unique, \
+from numpy import arange, interp, float32, datetime64, sqrt, asarray, newaxis, sum, max, unique, \
     radians, cos, sin, arctan2, zeros
 from pandas import read_csv, Series, DataFrame, date_range, concat, MultiIndex, to_datetime
 from shapely.geometry import Point
 from shapely.ops import nearest_points
 from windpowerlib import power_curves, wind_speed
 
-from src.helpers import filter_onshore_offshore_locations, union_regions, return_coordinates_from_shapefiles_light, \
+from helpers import filter_onshore_offshore_locations, union_regions, return_coordinates_from_shapefiles, \
     concatenate_dict_keys, return_dict_keys, chunk_split, collapse_dict_region_level, read_inputs, \
     retrieve_load_data_partitions, get_partition_index, return_region_divisions
 
 
-def read_database(file_path):
+def read_database(data_path, spatial_resolution):
     """
     Reads resource database from .nc files.
 
     Parameters
     ----------
-    file_path : str
-        Relative path to resource data.
+    data_path : str
+    spatial_resolution: float
 
     Returns
     -------
     dataset: xarray.Dataset
 
     """
+    file_path = join(data_path, 'input_data/resource_data', str(spatial_resolution))
     # Read through all files, extract the first 2 characters (giving the
     # macro-region) and append in a list that will keep the unique elements.
     files = [f for f in listdir(file_path) if isfile(join(file_path, f))]
@@ -53,8 +54,8 @@ def read_database(file_path):
         file_list = [file for file in glob(file_path + '/*.nc') if area in file]
         ds = xr.open_mfdataset(file_list,
                                combine='by_coords',
-                               chunks={'latitude': 100, 'longitude': 100}).stack(locations=('longitude', 'latitude')).astype(float32)
-        datasets.append(ds)
+                               chunks={'latitude': 100, 'longitude': 100}).stack(locations=('longitude', 'latitude'))
+        datasets.append(ds.astype(float32))
 
     # Concatenate all regions on locations.
     dataset = xr.concat(datasets, dim='locations')
@@ -69,32 +70,23 @@ def read_database(file_path):
     return dataset
 
 
-def filter_locations_by_layer(tech_dict, regions,
-                              start_coordinates, spatial_resolution,
-                              path_land_data, path_resource_data, path_population_data, path_shapefile_data,
-                              which='dummy', filename='dummy'):
+def filter_locations_by_layer(regions, start_coordinates, model_params, tech_params, which=None):
     """
     Filters (removes) locations from the initial set following various
-    land-, resource-, populatio-based criteria.
+    land-, resource-, population-based criteria.
 
     Parameters
     ----------
-    tech_dict : dict
-        Dict object containing technical parameters and constraints of a given technology.
+    regions : list
+        Region list.
     start_coordinates : list
         List of initial (starting) coordinates.
-    spatial_resolution : float
-        Spatial resolution of the resource data.
-    path_land_data : str
-        Relative path to land data.
-    path_resource_data : str
-        Relative path to resource data.
-    path_population_data : str
-        Relative path to population density data.
+    model_params : dict
+
+    tech_params : dict
+
     which : str
         Filter to be applied.
-    filename : str
-        Name of the file; associated with the filter type.
 
     Returns
     -------
@@ -103,18 +95,17 @@ def filter_locations_by_layer(tech_dict, regions,
 
     """
 
-    start_coordinates = [(round(item[0], 2), round(item[1], 2)) for item in start_coordinates]
+    coords_to_remove = None
+    data_path = model_params['data_path']
 
     if which == 'protected_areas':
 
-        protected_areas_selection = tech_dict['protected_areas_selection']
-        threshold_distance = tech_dict['protected_areas_distance_threshold']
-
+        protected_areas_selection = tech_params['protected_areas_selection']
+        threshold_distance = tech_params['protected_areas_distance_threshold']
         coords_to_remove = []
 
-        R = 6371.
-
-        dataset = read_file(join(path_land_data, filename))
+        areas_fn = join(data_path, 'input_data/land_data', 'WDPA_Feb2019-shapefile-points.shp')
+        dataset = read_file(areas_fn)
 
         lons = []
         lats = []
@@ -122,7 +113,7 @@ def filter_locations_by_layer(tech_dict, regions,
         # Retrieve the geopandas Point objects and their coordinates
         for item in protected_areas_selection:
             for index, row in dataset.iterrows():
-                if (row['IUCN_CAT'] == item):
+                if row['IUCN_CAT'] == item:
                     lons.append(round(row.geometry[0].x, 2))
                     lats.append(round(row.geometry[0].y, 2))
 
@@ -142,47 +133,47 @@ def filter_locations_by_layer(tech_dict, regions,
                 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
                 c = 2 * arctan2(sqrt(a), sqrt(1 - a))
 
-                distance = R * c
+                distance = 6371. * c
 
                 if distance < threshold_distance:
                     coords_to_remove.append(i)
 
-    elif which == 'resource':
+    elif which == 'resource_quality':
 
-        database = read_database(path_resource_data)
+        database = read_database(data_path, model_params['spatial_resolution'])
 
-        if tech_dict['resource'] == 'wind':
+        if tech_params['resource'] == 'wind':
             array_resource = xu.sqrt(database.u100 ** 2 +
                                      database.v100 ** 2)
-        elif tech_dict['resource'] == 'solar':
+        elif tech_params['resource'] == 'solar':
             array_resource = database.ssrd / 3600.
+        else:
+            raise ValueError (" This resource is not available.")
 
         array_resource_mean = array_resource.mean(dim='time')
-        mask_resource = array_resource_mean.where(array_resource_mean.data < tech_dict['resource_threshold'])
+        mask_resource = array_resource_mean.where(array_resource_mean.data < tech_params['resource_threshold'])
         coords_mask_resource = mask_resource[mask_resource.notnull()].locations.values.tolist()
-
-        coords_mask_rounded = [(round(item[0], 2), round(item[1], 2)) for item in coords_mask_resource]
-
-        coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_rounded)))
+        coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_resource]
+        coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
     elif which == 'latitude':
 
-        latitude_threshold = tech_dict['latitude_threshold']
-        coords_mask_latitude = [item for item in start_coordinates if item[1] > latitude_threshold]
-
-        coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_latitude)))
+        latitude_threshold = tech_params['latitude_threshold']
+        coords_mask_latitude = [(lon, lat) for (lon, lat) in start_coordinates if lat > latitude_threshold]
+        coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_latitude]
+        coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
     elif which == 'distance':
 
-        distance_threshold_min = tech_dict['distance_threshold_min']
-        distance_threshold_max = tech_dict['distance_threshold_max']
+        distance_threshold_min = tech_params['distance_threshold_min']
+        distance_threshold_max = tech_params['distance_threshold_max']
 
         offshore_points = filter_onshore_offshore_locations(start_coordinates,
-                                                            spatial_resolution,
-                                                            tech_dict,
-                                                            'wind_offshore')
-
-        onshore_shape = union_regions(regions, path_shapefile_data, which='onshore', prepped=False)
+                                                            model_params['data_path'],
+                                                            model_params['spatial_resolution'],
+                                                            tech_params,
+                                                            tech='wind_offshore')
+        onshore_shape = union_regions(regions, data_path, which='onshore', prepped=False)
 
         offshore_distances = {key: None for key in offshore_points}
         for key in offshore_distances:
@@ -194,88 +185,96 @@ def filter_locations_by_layer(tech_dict, regions,
         offshore_locations = {k: v for k, v in offshore_distances.items() if ((v < distance_threshold_min) |
                                                                               (v >= distance_threshold_max))}
         coords_mask_distance = list(offshore_locations.keys())
-        coords_mask_rounded = [(round(item[0], 2), round(item[1], 2)) for item in coords_mask_distance]
-
-        coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_rounded)))
+        coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_distance]
+        coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
-    elif which in ['orography', 'forestry', 'water_mask', 'bathymetry']:
+    elif which == 'orography':
 
-        dataset = xr.open_dataset(join(path_land_data, filename)).astype(float32)
+        orography_fn = 'ERA5_orography_characteristics_20181231_' + str(model_params['spatial_resolution']) + '.nc'
+        orography_path = join(data_path, 'input_data/land_data', orography_fn)
+        dataset = xr.open_dataset(orography_path).astype(float32)
         dataset = dataset.sortby([dataset.longitude, dataset.latitude])
         dataset = dataset.assign_coords(longitude=(((dataset.longitude
                                                      + 180) % 360) - 180)).sortby('longitude')
         dataset = dataset.drop('time').squeeze().stack(locations=('longitude', 'latitude'))
 
-        if which == 'orography':
+        altitude_threshold = tech_params['altitude_threshold']
+        array_altitude = dataset['z'] / 9.80665
 
-            altitude_threshold = tech_dict['altitude_threshold']
-            slope_threshold = tech_dict['terrain_slope_threshold']
+        slope_threshold = tech_params['terrain_slope_threshold']
+        array_slope = dataset['slor']
 
-            array_altitude = dataset['z'] / 9.80665
-            array_slope = dataset['slor']
+        mask_altitude = array_altitude.where(array_altitude.data > altitude_threshold)
+        mask_slope = array_slope.where(array_slope.data > slope_threshold)
 
-            mask_altitude = array_altitude.where(array_altitude.data > altitude_threshold)
-            mask_slope = array_slope.where(array_slope.data > slope_threshold)
+        coords_mask_altitude = mask_altitude[mask_altitude.notnull()].locations.values.tolist()
+        coords_mask_slope = mask_slope[mask_slope.notnull()].locations.values.tolist()
 
-            coords_mask_altitude = mask_altitude[mask_altitude.notnull()].locations.values.tolist()
-            coords_mask_slope = mask_slope[mask_slope.notnull()].locations.values.tolist()
+        coords_mask_orography = list(set(coords_mask_altitude).union(set(coords_mask_slope)))
+        coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_orography]
+        coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
-            coords_mask_orography = list(set(coords_mask_altitude).union(set(coords_mask_slope)))
-            coords_mask_orography_round = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_orography]
+    elif which in ['forestry', 'water_mask', 'bathymetry']:
 
-            coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_orography_round)))
-
-        elif which == 'forestry':
+        surface_fn = 'ERA5_surface_characteristics_20181231_' + str(model_params['spatial_resolution']) + '.nc'
+        surface_path = join(data_path, 'input_data/land_data', surface_fn)
+        dataset = xr.open_dataset(surface_path).astype(float32)
+        dataset = dataset.sortby([dataset.longitude, dataset.latitude])
+        dataset = dataset.assign_coords(longitude=(((dataset.longitude
+                                                     + 180) % 360) - 180)).sortby('longitude')
+        dataset = dataset.drop('time').squeeze().stack(locations=('longitude', 'latitude'))
 
-            forestry_threshold = tech_dict['forestry_ratio_threshold']
+        if which == 'forestry':
 
+            forestry_threshold = tech_params['forestry_ratio_threshold']
             array_forestry = dataset['cvh']
 
             mask_forestry = array_forestry.where(array_forestry.data >= forestry_threshold)
             coords_mask_forestry = mask_forestry[mask_forestry.notnull()].locations.values.tolist()
-            coords_mask_forestry_round = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_forestry]
-
-            coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_forestry_round)))
+            coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_forestry]
+            coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
         elif which == 'water_mask':
 
+            watermask_threshold = 0.4
             array_watermask = dataset['lsm']
 
-            mask_watermask = array_watermask.where(array_watermask.data < 0.4)
+            mask_watermask = array_watermask.where(array_watermask.data < watermask_threshold)
             coords_mask_watermask = mask_watermask[mask_watermask.notnull()].locations.values.tolist()
-
-            coords_mask_rounded = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_watermask]
-            coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_rounded)))
+            coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_watermask]
+            coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
         elif which == 'bathymetry':
 
-            depth_threshold_low = tech_dict['depth_threshold_low']
-            depth_threshold_high = tech_dict['depth_threshold_high']
+            depth_threshold_low = tech_params['depth_threshold_low']
+            depth_threshold_high = tech_params['depth_threshold_high']
 
             array_watermask = dataset['lsm']
-            # Careful with this one because max depth is 999.
+            # max depth is 999.
             array_bathymetry = dataset['wmb'].fillna(0.)
 
             mask_offshore = array_bathymetry.where(((array_bathymetry.data < depth_threshold_low) |
                                                     (array_bathymetry.data > depth_threshold_high)) |
                                                     (array_watermask.data > 0.1))
             coords_mask_offshore = mask_offshore[mask_offshore.notnull()].locations.values.tolist()
-            coords_mask_rounded = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_offshore]
-
-            coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_rounded)))
+            coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_offshore]
+            coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
-    elif which == 'population':
+    elif which == 'population_density':
 
-        dataset = xr.open_dataset(join(path_population_data, filename))
+        population_fn = 'gpw_v4_population_density_adjusted_rev11_0.5.nc'
+        population_path = join(data_path, 'input_data/population_density', population_fn)
+        dataset = xr.open_dataset(population_path)
 
         varname = [item for item in dataset.data_vars][0]
         dataset = dataset.rename({varname: 'data'})
         # The value of 5 for "raster" fetches data for the latest estimate available in the dataset, that is, 2020.
         data_pop = dataset.sel(raster=5)
 
-        array_pop_density = data_pop['data'].interp(longitude=sorted(list(set([item[0] for item in start_coordinates]))),
-                                                    latitude=sorted(list(set([item[1] for item in start_coordinates])))[::-1],
-                                                    method='nearest').fillna(0.)
+        array_pop_density = \
+            data_pop['data'].interp(longitude=sorted(list(set([item[0] for item in start_coordinates]))),
+                                    latitude=sorted(list(set([item[1] for item in start_coordinates])))[::-1],
+                                    method='nearest').fillna(0.)
         # Temporary, to reduce the size of this ds, which is anyway read in each iteration.
         min_lon, max_lon, min_lat, max_lat = -11., 32., 35., 80.
         mask_lon = (array_pop_density.longitude >= min_lon) & (array_pop_density.longitude <= max_lon)
@@ -283,83 +282,33 @@ def filter_locations_by_layer(tech_dict, regions,
 
         pop_ds = array_pop_density.where(mask_lon & mask_lat, drop=True).stack(locations=('longitude', 'latitude'))
 
-        population_density_threshold_low = tech_dict['population_density_threshold_low']
-        population_density_threshold_high = tech_dict['population_density_threshold_high']
+        population_density_threshold_low = tech_params['population_density_threshold_low']
+        population_density_threshold_high = tech_params['population_density_threshold_high']
 
         mask_population = pop_ds.where((pop_ds.data < population_density_threshold_low) |
                                        (pop_ds.data > population_density_threshold_high))
         coords_mask_population = mask_population[mask_population.notnull()].locations.values.tolist()
-        coords_to_remove = list(set(start_coordinates).intersection(set(coords_mask_population)))
+        coords_mask = [(round(lon, 2), round(lat, 2)) for (lon, lat) in coords_mask_population]
+        coords_to_remove = sorted(list(set(start_coordinates).intersection(set(coords_mask))))
 
     else:
-
         raise ValueError(' Layer {} is not available.'.format(str(which)))
 
     return coords_to_remove
 
 
-def return_filtered_coordinates(dataset, spatial_resolution, technologies, regions,
-                                path_land_data, path_resource_data, path_shapefile_data, path_population_data,
-                                resource_quality_layer=True, population_density_layer=True, protected_areas_layer=False,
-                                orography_layer=True, forestry_layer=True, water_mask_layer=True, bathymetry_layer=True,
-                                latitude_layer=True, distance_layer=True):
+def return_filtered_coordinates(dataset, model_params, tech_params):
     """
     Returns the set of potential deployment locations for each region and available technology.
 
     Parameters
     ----------
-    coordinates_in_region : list
-        List of coordinate pairs in the region of interest.
-    spatial_resolution : float
-        Spatial resolution of the resource data.
-    technologies : list
-        List of available technologies.
-    regions : list
-        List of regions.
-    path_land_data : str
-
-    path_resource_data : str
-
-    path_shapefile_data : str
-
-    path_population_data : str
-
-    resource_quality_layer : bool
-        "True" if the layer to be applied, "False" otherwise. If taken into account,
-        it discards points in coordinates_in_region based on the average resource
-        quality over the available time horizon. Resource quality threshold defined
-        in the config_tech.yaml file.
-    population_density_layer : bool
-        "True" if the layer to be applied, "False" otherwise. If taken into account,
-        it discards points in coordinates_in_region based on the population density.
-        Population density threshold defined in the config_tech.yaml file per each
-        available technology.
-    protected_areas_layer : bool
-        "True" if the layer to be applied, "False" otherwise. If taken into account,
-        it discards points in coordinates_in_region based on the existance of protected
-        areas in their vicinity. Distance threshold, as well as classes of areas are
-         defined in the config_tech.yaml file.
-    orography_layer : bool
-        "True" if the layer to be applied, "False" otherwise. If taken into account,
-        it discards points in coordinates_in_region based on their altitude and terrain
-        slope. Both thresholds defined in the config_tech.yaml file for each individual
-        technology.
-    forestry_layer : bool
-        "True" if the layer to be applied, "False" otherwise. If taken into account,
-        it discards points in coordinates_in_region based on its forest cover share.
-        Forest share threshold above which technologies are not built are defined
-        in the config_tech.yaml file.
-    water_mask_layer : bool
-        "True" if the layer to be applied, "False" otherwise. If taken into account,
-        it discards points in coordinates_in_region based on the water coverage share.
-        Threshold defined in the config_tech.yaml file.
-    bathymetry_layer : bool
-        "True" if the layer to be applied, "False" otherwise. If taken into account,
-        (valid for offshore technologies) it discards points in coordinates_in_region
-        based on the water depth. Associated threshold defined in the config_tech.yaml
-        file for offshore and floating wind, respectively.
-    latitude_layer: bool
-    distance_layer: bool
+    dataset: xr.Dataset
+        Resource dataset.
+    model_params: dict
+        Model parameters.
+    tech_params: dict
+        Technology parameters.
 
     Returns
     -------
@@ -367,173 +316,51 @@ def return_filtered_coordinates(dataset, spatial_resolution, technologies, regio
         Dict object storing potential locations sets per region and technology.
 
     """
-    tech_config = read_inputs('../config_techs.yml')
+    technologies = model_params['technologies']
+    regions = model_params['regions']
     output_dict = {region: {tech: None for tech in technologies} for region in regions}
-    final_coordinates = {key: None for key in technologies}
+    coordinates_dict = {key: None for key in technologies}
 
-    region_shape = union_regions(regions, path_shapefile_data, which='both')
+    region_shape = union_regions(regions, model_params['data_path'], which='both')
+    coordinates = return_coordinates_from_shapefiles(dataset, region_shape)
+    start_coordinates = {(lon, lat): None for (lon, lat) in coordinates}
+    for (lon, lat) in start_coordinates:
+        start_coordinates[(lon, lat)] = (round(lon, 2), round(lat, 2))
 
     for tech in technologies:
+        coords_to_remove = []
+        tech_dict = tech_params[tech]
 
-        tech_dict = tech_config[tech]
-        start_coordinates = return_coordinates_from_shapefiles_light(dataset, region_shape)
-        start_coordinates_dict = {k: None for k in start_coordinates}
-        for k in start_coordinates_dict.keys():
-            start_coordinates_dict[k] = (round(k[0], 2), round(k[1], 2))
-
-        if resource_quality_layer:
-
-            coords_to_remove_resource = \
-                filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                          path_land_data, path_resource_data, path_population_data, path_shapefile_data,
-                                          which='resource')
-        else:
-            coords_to_remove_resource = []
-
-        if latitude_layer:
-
-            coords_to_remove_latitude = \
-                filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                          path_land_data, path_resource_data, path_population_data, path_shapefile_data,
-                                          which='latitude')
-        else:
-            coords_to_remove_latitude = []
-
-        if tech_dict['deployment'] in ['onshore', 'utility', 'residential']:
-
-            if population_density_layer:
-                filename = 'gpw_v4_population_density_adjusted_rev11_0.5.nc'
-                coords_to_remove_population = \
-                    filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                              path_land_data, path_resource_data, path_population_data,
-                                              path_shapefile_data,
-                                              which='population', filename=filename)
-
-            else:
-                coords_to_remove_population = []
-
-            if protected_areas_layer:
-                filename = 'WDPA_Feb2019-shapefile-points.shp'
-                coords_to_remove_protected_areas = \
-                    filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                              path_land_data, path_resource_data, path_population_data,
-                                              path_shapefile_data,
-                                              which='protected_areas', filename=filename)
-            else:
-                coords_to_remove_protected_areas = []
-
-            if orography_layer:
-                filename = 'ERA5_orography_characteristics_20181231_' + str(spatial_resolution) + '.nc'
-                coords_to_remove_orography = \
-                    filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                              path_land_data, path_resource_data, path_population_data,
-                                              path_shapefile_data,
-                                              which='orography', filename=filename)
-            else:
-                coords_to_remove_orography = []
-
-            if forestry_layer:
-                filename = 'ERA5_surface_characteristics_20181231_' + str(spatial_resolution) + '.nc'
-                coords_to_remove_forestry = \
-                    filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                              path_land_data, path_resource_data, path_population_data,
-                                              path_shapefile_data,
-                                              which='forestry', filename=filename)
-            else:
-                coords_to_remove_forestry = []
-
-            if water_mask_layer:
-                filename = 'ERA5_surface_characteristics_20181231_' + str(spatial_resolution) + '.nc'
-                coords_to_remove_water = \
-                    filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                              path_land_data, path_resource_data, path_population_data,
-                                              path_shapefile_data,
-                                              which='water_mask', filename=filename)
-
-            else:
-                coords_to_remove_water = []
-
-            list_coords_to_remove = [coords_to_remove_resource,
-                                     coords_to_remove_latitude,
-                                     coords_to_remove_population,
-                                     coords_to_remove_protected_areas,
-                                     coords_to_remove_orography,
-                                     coords_to_remove_forestry,
-                                     coords_to_remove_water]
-            coords_to_remove = set().union(*list_coords_to_remove)
-
-            # Set difference between "global" coordinates and the sets computed in this function.
-            updated_coordinates = set(start_coordinates_dict.values()) - coords_to_remove
-
-        elif tech_dict['deployment'] in ['offshore', 'floating']:
-
-            if bathymetry_layer:
-                filename = 'ERA5_surface_characteristics_20181231_' + str(spatial_resolution) + '.nc'
-                coords_to_remove_bathymetry = \
-                    filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                              path_land_data, path_resource_data, path_population_data,
-                                              path_shapefile_data,
-                                              which='bathymetry', filename=filename)
-            else:
-                coords_to_remove_bathymetry = []
-
-            if distance_layer:
-
-                coords_to_remove_distance = \
-                    filter_locations_by_layer(tech_dict, regions, start_coordinates, spatial_resolution,
-                                              path_land_data, path_resource_data, path_population_data,
-                                              path_shapefile_data,
-                                              which='distance')
-
-            else:
-
-                coords_to_remove_distance = []
-
-            list_coords_to_remove = [coords_to_remove_resource,
-                                     coords_to_remove_bathymetry,
-                                     coords_to_remove_distance,
-                                     coords_to_remove_latitude]
-            coords_to_remove = set().union(*list_coords_to_remove)
-            updated_coordinates = set(start_coordinates_dict.values()) - coords_to_remove
-
-        final_coordinates[tech] = [key for key, value in start_coordinates_dict.items() if value in updated_coordinates]
-
-        # if len(final_coordinates[tech]) > 0:
-        #    from src.helpers import plot_basemap
-        #    plot_basemap(final_coordinates[tech], title=tech)
+        for layer in tech_dict['filters']:
+            to_remove_from_filter = filter_locations_by_layer(regions, list(start_coordinates.values()),
+                                                              model_params, tech_dict, which=layer)
+            coords_to_remove.extend(to_remove_from_filter)
 
-    for tech in technologies:
+        original_coordinates_list = []
+        start_coordinates_reversed = dict(map(reversed, start_coordinates.items()))
+        for item in set(start_coordinates.values()).difference(set(coords_to_remove)):
+            original_coordinates_list.append(start_coordinates_reversed[item])
+        coordinates_dict[tech] = original_coordinates_list
 
-        list_points_unique = []
+        unique_list_of_points = []
         for region in regions:
 
-            shape_region = union_regions([region], path_shapefile_data, which='both')
-            points_in_region = return_coordinates_from_shapefiles_light(dataset, shape_region)
+            shape_region = union_regions([region], model_params['data_path'], which='both')
+            points_in_region = return_coordinates_from_shapefiles(dataset, shape_region)
 
-            points_to_take = list(set(final_coordinates[tech]).intersection(set(points_in_region)))
-            output_dict[region][tech] = [p for p in points_to_take if p not in list_points_unique]
-            list_points_unique.extend(points_to_take)
+            points_to_keep = list(set(coordinates_dict[tech]).intersection(set(points_in_region)))
+            output_dict[region][tech] = [p for p in points_to_keep if p not in unique_list_of_points]
+            unique_list_of_points.extend(points_to_keep)
 
-            if len(points_to_take) > 0:
-                print(region, tech, len(points_to_take))
+            if len(output_dict[region][tech]) > 0:
+                print(f"{len(output_dict[region][tech])} {tech} sites in {region}.")
             else:
-                print('{}, {} has no point.'.format(region, tech))
+                print(f"No {tech} sites in {region}.")
 
     for key, value in output_dict.items():
         output_dict[key] = {k: v for k, v in output_dict[key].items() if len(v) > 0}
 
-    result_dict = {r: {t: [] for t in technologies} for r in regions}
-    for region, tech in return_dict_keys(output_dict):
-        added_items = []
-        coords_region_tech = output_dict[region][tech]
-        for item in coords_region_tech:
-            if item in added_items:
-                continue
-            else:
-                result_dict[region][tech].append(item)
-                added_items.append(item)
-
-    return result_dict
+    return output_dict
 
 
 def selected_data(dataset, input_dict, time_slice):
@@ -582,7 +409,7 @@ def selected_data(dataset, input_dict, time_slice):
     return output_dict
 
 
-def return_output(input_dict, path_to_transfer_function, smooth_wind_power_curve=True):
+def return_output(input_dict, data_path, smooth_wind_power_curve=True):
     """
     Applies transfer function to raw resource data.
 
@@ -590,7 +417,7 @@ def return_output(input_dict, path_to_transfer_function, smooth_wind_power_curve
     ----------
     input_dict : dict
         Dict object storing raw resource data.
-    path_to_transfer_function : str
+    data_path : str
         Relative path to transfer function data.
     smooth_wind_power_curve : boolean
         If "True", the transfer function of wind assets replicates the one of a wind farm,
@@ -608,8 +435,10 @@ def return_output(input_dict, path_to_transfer_function, smooth_wind_power_curve
     output_dict = deepcopy(input_dict)
     tech_dict = read_inputs('../config_techs.yml')
 
-    data_converter_wind = read_csv(join(path_to_transfer_function, 'data_wind_turbines.csv'), sep=';', index_col=0)
-    data_converter_solar = read_csv(join(path_to_transfer_function, 'data_solar_modules.csv'), sep=';', index_col=0)
+    wind_data_path = join(data_path, 'input_data/transfer_functions', 'data_wind_turbines.csv')
+    data_converter_wind = read_csv(wind_data_path, sep=';', index_col=0)
+    solar_data_path = join(data_path, 'input_data/transfer_functions', 'data_solar_modules.csv')
+    data_converter_solar = read_csv(solar_data_path, sep=';', index_col=0)
 
     for region, tech in key_list:
 
@@ -666,12 +495,12 @@ def return_output(input_dict, path_to_transfer_function, smooth_wind_power_curve
 
                     if smooth_wind_power_curve:
                         # Windpowerlib function here:
-                        # https://windpowerlib.readthedocs.io/en/latest/temp/windpowerlib.power_curves.smooth_power_curve.html
-                        capacity_factor_farm = power_curves.smooth_power_curve(Series(wind_speed_references),
-                                                                               Series(capacity_factor_references_pu),
-                                                                               standard_deviation_method='turbulence_intensity',
-                                                                               turbulence_intensity=float(
-                                                                                   ti.mean().values))
+                        # windpowerlib.readthedocs.io/en/latest/temp/windpowerlib.power_curves.smooth_power_curve.html
+                        capacity_factor_farm = \
+                            power_curves.smooth_power_curve(Series(wind_speed_references),
+                                                            Series(capacity_factor_references_pu),
+                                                            standard_deviation_method='turbulence_intensity',
+                                                            turbulence_intensity=float(ti.mean().values))
 
                         power_output = da.map_blocks(interp, wind_data,
                                                      capacity_factor_farm['wind_speed'].values,
@@ -707,10 +536,10 @@ def return_output(input_dict, path_to_transfer_function, smooth_wind_power_curve
             # Homer equation here:
             # https://www.homerenergy.com/products/pro/docs/latest/how_homer_calculates_the_pv_array_power_output.html
             # https://enphase.com/sites/default/files/Enphase_PVWatts_Derate_Guide_ModSolar_06-2014.pdf
-            power_output = (float(data_converter_solar.loc['f', converter]) * \
-                            (irradiance / float(data_converter_solar.loc['G_ref', converter])) * \
-                            (1. + float(data_converter_solar.loc['k_P [%/C]', converter]) / 100. * \
-                             (temperature - float(data_converter_solar.loc['t_ref', converter]))))
+            power_output = (float(data_converter_solar.loc['f', converter]) *
+                            (irradiance / float(data_converter_solar.loc['G_ref', converter])) *
+                            (1. + float(data_converter_solar.loc['k_P [%/C]', converter]) / 100. *
+                            (temperature - float(data_converter_solar.loc['t_ref', converter]))))
 
             output_array = xr.DataArray(power_output, coords=coordinates, dims=dimensions)
 
@@ -719,15 +548,17 @@ def return_output(input_dict, path_to_transfer_function, smooth_wind_power_curve
 
         output_array = output_array.where(output_array > 0.01, other=0.0)
         output_dict[region][tech] = output_array.reindex_like(input_dict[region][tech])
-        # output_array.mean(dim='time').unstack('locations').plot(x='longitude', y='latitude')
-        # plt.show()
 
     return output_dict
 
+
 ##############################################################################
-def resource_quality_mapping(input_dict, delta, measure):
-    key_list = return_dict_keys(input_dict)
+def resource_quality_mapping(input_dict, siting_params):
+
+    delta = siting_params['delta']
+    measure = siting_params['smooth_measure']
 
+    key_list = return_dict_keys(input_dict)
     output_dict = deepcopy(input_dict)
 
     for region, tech in key_list:
@@ -757,11 +588,16 @@ def resource_quality_mapping(input_dict, delta, measure):
     return output_dict
 
 
-def critical_window_mapping(input_dict,
-                            alpha, delta,
-                            regions, date_slice, path_load_data, norm_type):
-    key_list = return_dict_keys(input_dict)
+def critical_window_mapping(input_dict, model_params):
+
+    regions = model_params['regions']
+    date_slice = model_params['time_slice']
+    alpha = model_params['siting_params']['alpha']
+    delta = model_params['siting_params']['delta']
+    norm_type = model_params['siting_params']['norm_type']
+    path_load_data = join(model_params['data_path'], 'input_data/load_data')
 
+    key_list = return_dict_keys(input_dict)
     output_dict = deepcopy(input_dict)
 
     if alpha == 'load_central':
@@ -791,16 +627,15 @@ def critical_window_mapping(input_dict,
     return output_dict
 
 
-def critical_data_position_mapping(input_dict, coordinates_data):
+def sites_position_mapping(input_dict):
 
     key_list = return_dict_keys(input_dict)
     locations_list = []
     for region, tech in key_list:
         locations_list.extend([(tech, loc) for loc in input_dict[region][tech].locations.values.flatten()])
-        coordinates_data[region][tech] = input_dict[region][tech].locations.values.flatten()
     locations_dict = dict(zip(locations_list, list(arange(len(locations_list)))))
 
-    return locations_dict, coordinates_data
+    return locations_dict
 
 ##########################################################
 
@@ -828,30 +663,19 @@ def retrieve_location_dict(x_values, model_parameters, site_positions):
     return output_dict
 
 
-def retrieve_index_dict(model_parameters, coordinate_dict):
+def retrieve_index_dict(deployment_vector, coordinate_dict):
 
-    d = model_parameters['deployment_vector']
-    if isinstance(d[list(d.keys())[0]], int):
-        dict_deployment = d
-        n = sum(dict_deployment[item] for item in dict_deployment)
-        partitions = [item for item in d]
-        if model_parameters['deployment_constraint'] == 'country':
-            indices = get_partition_index(coordinate_dict, d, capacity_split='per_country')
-        elif model_parameters['deployment_constraint'] == 'tech':
-            indices = get_partition_index(coordinate_dict, d, capacity_split='per_tech')
-    else:
-        dict_deployment = concatenate_dict_keys(d)
-        n = sum(dict_deployment[item] for item in dict_deployment)
-        partitions = [item for item in dict_deployment]
-        indices = concatenate_dict_keys(get_partition_index(coordinate_dict, d, capacity_split='per_country_and_tech'))
+    dict_deployment = concatenate_dict_keys(deployment_vector)
+    n = sum(dict_deployment[item] for item in dict_deployment)
+    partitions = [item for item in dict_deployment]
+    indices = concatenate_dict_keys(get_partition_index(coordinate_dict))
 
     return n, dict_deployment, partitions, indices
 
 
-def retrieve_site_data(model_parameters, coordinates_dict, output_data, criticality_data, location_mapping, c,
-                       site_coordinates, objective, output_folder):
+def retrieve_site_data(model_parameters, deployment_dict, coordinates_dict, output_data, criticality_data,
+                       location_mapping, c, site_coordinates, objective, output_folder, benchmarks=True):
 
-    deployment_dict = model_parameters['deployment_vector']
     output_by_tech = collapse_dict_region_level(output_data)
     time_slice = model_parameters['time_slice']
     time_dt = date_range(start=time_slice[0], end=time_slice[1], freq='H')
@@ -860,6 +684,16 @@ def retrieve_site_data(model_parameters, coordinates_dict, output_data, critical
         _, index = unique(output_by_tech[tech].locations, return_index=True)
         output_by_tech[tech] = output_by_tech[tech].isel(locations=index)
 
+    # Init coordinate set.
+    tech_dict = {key: [] for key in list(output_by_tech.keys())}
+    for tech in tech_dict:
+        for region in coordinates_dict.keys():
+            for t in coordinates_dict[region].keys():
+                if t == tech:
+                    tech_dict[tech].extend(sorted(coordinates_dict[region][t], key=lambda x: (x[0], x[1])))
+
+    pickle.dump(tech_dict, open(join(output_folder, 'init_coordinates_dict.p'), 'wb'))
+
     # Retrieve complementary sites
     comp_site_data = {key: None for key in site_coordinates}
     for item in site_coordinates:
@@ -872,111 +706,96 @@ def retrieve_site_data(model_parameters, coordinates_dict, output_data, critical
     reform = {(outerKey, innerKey): values for outerKey, innerDict in comp_site_data.items() for innerKey, values in
               innerDict.items()}
     comp_site_data_df = DataFrame(reform, index=time_dt)
-
-    if model_parameters['solution_method']['RAND']['set']:
-        name = 'rand_site_data.p'
-    else:
-        name = 'comp_site_data.p'
-    pickle.dump(comp_site_data_df, open(join(output_folder, name), 'wb'))
-    # obj_comp = get_objective_from_mapfile(comp_site_data_df, location_mapping, criticality_data, c)
+    pickle.dump(comp_site_data_df, open(join(output_folder,  'comp_site_data.p'), 'wb'))
+    # Similarly done for any other deployment scheme done via the Julia heuristics.
 
     with open(join(output_folder, 'objective_comp.txt'), "w") as file:
         print(objective, file=file)
 
-    # Retrieve max sites
-    key_list = return_dict_keys(output_data)
-    output_location = deepcopy(output_data)
+    if benchmarks:
 
-    for region, tech in key_list:
-        n = deployment_dict[region][tech]
-        output_data_sum = output_data[region][tech].sum(dim='time')
-        if n != 0:
-            locs = output_data_sum.argsort()[-n:].values
-            output_location[region][tech] = sorted(output_data_sum.isel(locations=locs).locations.values.flatten())
-        else:
-            output_location[region][tech] = None
-
-    output_location_per_tech = {key: [] for key in model_parameters['technologies']}
-    for region in output_location:
-        for tech in output_location[region]:
-            if output_location[region][tech] is not None:
-                for item in output_location[region][tech]:
-                    output_location_per_tech[tech].append(item)
-
-    max_site_data = {key: None for key in model_parameters['technologies']}
-    for item in model_parameters['technologies']:
-        max_site_data[item] = {key: None for key in output_location_per_tech[item]}
+        # Retrieve max sites
+        key_list = return_dict_keys(output_data)
+        output_location = deepcopy(output_data)
 
-    for tech in max_site_data.keys():
-        for coord in max_site_data[tech].keys():
-            max_site_data[tech][coord] = output_by_tech[tech].sel(locations=coord).values.flatten()
-
-    reform = {(outerKey, innerKey): values for outerKey, innerDict in max_site_data.items() for innerKey, values in
-              sorted(innerDict.items())}
-    max_site_data_df = DataFrame(reform, index=time_dt)
-    pickle.dump(max_site_data_df, open(join(output_folder, 'prod_site_data.p'), 'wb'))
+        for region, tech in key_list:
+            n = deployment_dict[region][tech]
+            output_data_sum = output_data[region][tech].sum(dim='time')
+            if n != 0:
+                locs = output_data_sum.argsort()[-n:].values
+                output_location[region][tech] = sorted(output_data_sum.isel(locations=locs).locations.values.flatten())
+            else:
+                output_location[region][tech] = None
 
-    objective_prod = get_objective_from_mapfile(max_site_data_df, location_mapping, criticality_data, c)
-    with open(join(output_folder, 'objective_prod.txt'), "w") as file:
-        print(objective_prod, file=file)
+        output_location_per_tech = {key: [] for key in model_parameters['technologies']}
+        for region in output_location:
+            for tech in output_location[region]:
+                if output_location[region][tech] is not None:
+                    for item in output_location[region][tech]:
+                        output_location_per_tech[tech].append(item)
 
-    # Capacity credit sites.
+        max_site_data = {key: None for key in model_parameters['technologies']}
+        for item in model_parameters['technologies']:
+            max_site_data[item] = {key: None for key in output_location_per_tech[item]}
 
-    load_data = read_csv(join(model_parameters['path_load_data'], 'load_2009_2018.csv'), index_col=0)
-    load_data.index = to_datetime(load_data.index)
-    load_data = load_data[(load_data.index > time_slice[0]) & (load_data.index < time_slice[1])]
+        for tech in max_site_data.keys():
+            for coord in max_site_data[tech].keys():
+                max_site_data[tech][coord] = output_by_tech[tech].sel(locations=coord).values.flatten()
 
-    df_list = []
-    no_index = 0.05 * len(load_data.index)
-    for country in deployment_dict:
+        reform = {(outerKey, innerKey): values for outerKey, innerDict in max_site_data.items() for innerKey, values in
+                  sorted(innerDict.items())}
+        max_site_data_df = DataFrame(reform, index=time_dt)
+        pickle.dump(max_site_data_df, open(join(output_folder, 'prod_site_data.p'), 'wb'))
 
-        if country in load_data.columns:
-            load_country = load_data.loc[:, country]
-        else:
-            countries = return_region_divisions([country], model_parameters['path_shapefile_data'])
-            load_country = load_data.loc[:, countries].sum(axis=1)
-        load_country_max = load_country.nlargest(int(no_index)).index
+        objective_prod = get_objective_from_mapfile(max_site_data_df, location_mapping, criticality_data, c)
+        with open(join(output_folder, 'objective_prod.txt'), "w") as file:
+            print(objective_prod, file=file)
 
-        for tech in model_parameters['technologies']:
+        # Capacity credit sites.
 
-            country_data = output_data[country][tech]
-            country_data_avg_at_load_max = country_data.sel(time=load_country_max).mean(dim='time')
-            locs = country_data_avg_at_load_max.argsort()[-deployment_dict[country][tech]:].values
-            country_sites = country_data_avg_at_load_max.isel(locations=locs).locations.values.flatten()
+        load_data_fn = join(model_parameters['data_path'], 'input_data/load_data', 'load_2009_2018.csv')
+        load_data = read_csv(load_data_fn, index_col=0)
+        load_data.index = to_datetime(load_data.index)
+        load_data = load_data[(load_data.index > time_slice[0]) & (load_data.index < time_slice[1])]
 
-            xarray_data = country_data.sel(locations=country_sites)
-            df_data = xarray_data.to_pandas()
-            col_list_updated = [(tech, item) for item in df_data.columns]
-            df_data.columns = MultiIndex.from_tuples(col_list_updated)
-            df_list.append(df_data)
+        df_list = []
+        no_index = 0.05 * len(load_data.index)
+        for country in deployment_dict:
 
-    capv_site_data_df = concat(df_list, axis=1)
-    pickle.dump(capv_site_data_df, open(join(output_folder, 'capv_site_data.p'), 'wb'))
+            if country in load_data.columns:
+                load_country = load_data.loc[:, country]
+            else:
+                countries = return_region_divisions([country], model_parameters['data_path'])
+                load_country = load_data.loc[:, countries].sum(axis=1)
+            load_country_max = load_country.nlargest(int(no_index)).index
 
-    objective_capv = get_objective_from_mapfile(capv_site_data_df, location_mapping, criticality_data, c)
-    with open(join(output_folder, 'objective_capv.txt'), "w") as file:
-        print(objective_capv, file=file)
+            for tech in model_parameters['technologies']:
 
-    # Init coordinate set.
+                country_data = output_data[country][tech]
+                country_data_avg_at_load_max = country_data.sel(time=load_country_max).mean(dim='time')
+                locs = country_data_avg_at_load_max.argsort()[-deployment_dict[country][tech]:].values
+                country_sites = country_data_avg_at_load_max.isel(locations=locs).locations.values.flatten()
 
-    tech_dict = {key: [] for key in list(comp_site_data.keys())}
-    for tech in tech_dict:
-        for region in coordinates_dict.keys():
-            for t in coordinates_dict[region].keys():
-                if t == tech:
-                    tech_dict[tech].extend(sorted(coordinates_dict[region][t], key=lambda x: (x[0], x[1])))
+                xarray_data = country_data.sel(locations=country_sites)
+                df_data = xarray_data.to_pandas()
+                col_list_updated = [(tech, item) for item in df_data.columns]
+                df_data.columns = MultiIndex.from_tuples(col_list_updated)
+                df_list.append(df_data)
 
-    pickle.dump(tech_dict, open(join(output_folder, 'init_coordinates_dict.p'), 'wb'))
+        capv_site_data_df = concat(df_list, axis=1)
+        pickle.dump(capv_site_data_df, open(join(output_folder, 'capv_site_data.p'), 'wb'))
 
-    return output_location
+        objective_capv = get_objective_from_mapfile(capv_site_data_df, location_mapping, criticality_data, c)
+        with open(join(output_folder, 'objective_capv.txt'), "w") as file:
+            print(objective_capv, file=file)
 
 
-def get_objective_from_mapfile(df_sites, mapping_file, D, c):
+def get_objective_from_mapfile(df_sites, mapping_file, criticality_data, c):
 
     sites = [item for item in df_sites.columns]
     positions_in_matrix = [mapping_file[s] for s in sites]
 
-    xs = zeros(shape=D.shape[1])
+    xs = zeros(shape=criticality_data.shape[1])
     xs[positions_in_matrix] = 1
 
-    return (D.dot(xs) >= c).astype(int).sum()
+    return (criticality_data.dot(xs) >= c).astype(int).sum()
diff --git a/src_acquisition/data_retrieval.py b/src_acquisition/data_retrieval.py
deleted file mode 100644
index abc0f7ed714990604047849a03b034c1057ccd78..0000000000000000000000000000000000000000
--- a/src_acquisition/data_retrieval.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import cdsapi
-import os
-
-regions = {'EU': '75/-20/30/40'}
-
-years = ['2017', '2018']
-months = ['01','02','03','04','05','06','07','08','09','10','11','12']
-
-spatial_resolution = 0.28
-
-for region in regions.keys():
-    directory = '../input_data/resource_data/' + str(spatial_resolution) + '/'
-    if not os.path.exists(directory):
-        os.makedirs(directory)
-c = cdsapi.Client()
-for key, value in regions.items():
-    for year in years:
-        for month in months:
-                c.retrieve(
-                    'reanalysis-era5-single-levels',
-                    {
-                        'variable':['100m_u_component_of_wind','100m_v_component_of_wind',
-                                    '2m_temperature', 'surface_solar_radiation_downwards', 'forecast_surface_roughness'],
-                        'product_type':'reanalysis',
-                        'area': str(value),
-                        'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
-                        'year':year,
-                        'month':month,
-                        'day':[ '01','02','03','04','05','06','07','08','09','10','11','12','13','14','15',
-                                '16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31'],
-                        'time': ['00:00', '01:00', '02:00','03:00', '04:00', '05:00','06:00', '07:00', '08:00',
-                                '09:00', '10:00', '11:00','12:00', '13:00', '14:00','15:00', '16:00', '17:00',
-                                '18:00', '19:00', '20:00','21:00', '22:00', '23:00'],
-                        'format':'netcdf'
-                    },
-                    f"{directory}/{region}_{year}_{month}.nc")
-
-
-
-# c = cdsapi.Client()
-# c.retrieve(
-#     'reanalysis-era5-single-levels',
-#     {
-#         'variable':['low_vegetation_cover','high_vegetation_cover','land_sea_mask', 'model_bathymetry', 'orography', 'sea_ice_cover'],
-#         'product_type':'reanalysis',
-#         'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
-#         'year':'2017',
-#         'month':'12',
-#         'day':'31',
-#         'time':'00:00',
-#         'format':'netcdf'
-#     },
-#     '../input_data/land_mask/'+'ERA5_surface_characteristics_20181231_'+str(spatial_resolution)+'.nc')
-
-# c.retrieve(
-#     'reanalysis-era5-single-levels',
-#     {
-#         'product_type':'reanalysis',
-#         'variable':[
-#             'orography','slope_of_sub_gridscale_orography'
-#         ],
-#         'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
-#         'year':'2017',
-#         'month':'12',
-#         'day':'31',
-#         'time':'00:00',
-#         'format':'netcdf'
-#     },
-#     '../input_data/land_mask/'+'ERA5_orography_characteristics_20181231_'+str(spatial_resolution)+'.nc')
diff --git a/src_postprocessing/output.py b/src_postprocessing/output.py
deleted file mode 100644
index 51c7b8b492c43864a39e4dcc6f2bf39fb14278b2..0000000000000000000000000000000000000000
--- a/src_postprocessing/output.py
+++ /dev/null
@@ -1,877 +0,0 @@
-from os.path import abspath
-import pickle
-from shapely.geometry import Polygon
-from copy import deepcopy
-from itertools import cycle, islice
-from os.path import abspath
-from os.path import join
-from random import randint
-
-import cartopy.crs as ccrs
-import cartopy.feature as cfeature
-import matplotlib.dates as mdates
-import matplotlib.pyplot as plt
-import geopandas as gpd
-import numpy as np
-import pandas as pd
-from xarray import concat
-import os
-
-
-# class Output(object):
-#
-#
-#     def __init__(self, path):
-#
-#         self.output_folder_path = abspath(join('../output_data/', path))
-#         self.parameters_yaml = read_inputs_plotting(self.output_folder_path)
-#         self.outputs_pickle = read_output(path)
-#
-#     def return_numerics(self, choice, ndiff=1, firm_threshold=0.3):
-#         """Function returning statistical measures associatd with the aggregation
-#         of different resource time series.
-#
-#             Parameters:
-#
-#             ------------
-#
-#             choice : list
-#                 The time series aggregation to assess.
-#                     "opt" - the (optimal) selection associated with the suggested siting problem
-#                     "max" - the selection of n locations maximizing production
-#                     "rand" - the selection of n random sites
-#
-#             ndiff : int
-#                 Defines the nth difference to be computed within time series.
-#
-#             firm_threshold : float
-#                 Defines the threshold that defines the "firmness" of time series
-#
-#             Returns:
-#
-#             ------------
-#
-#             result_dict : dict
-#                 Dictionary containing various indexed indicators to be plotted.
-#         """
-#
-#         print('Problem: {}'.format(self.parameters_yaml['main_problem'], end=', '))
-#         print('Objective: {}'.format(self.parameters_yaml['main_objective'], end=', '))
-#         print('Spatial resolution: {}'.format(self.parameters_yaml['spatial_resolution'], end=', '))
-#         print('Time horizon: {}'.format(self.parameters_yaml['time_slice'], end=', '))
-#         print('Measure: {}'.format(self.parameters_yaml['resource_quality_measure'], end=', '))
-#         print('alpha: {}'.format(self.parameters_yaml['alpha_rule'], end=', '))
-#         print('delta: {}'.format(self.parameters_yaml['delta'], end=', '))
-#         print('beta: {}'.format(self.parameters_yaml['beta']))
-#
-#         path_resource_data = self.parameters_yaml['path_resource_data'] + str(self.parameters_yaml['spatial_resolution']) + '/'
-#         path_transfer_function_data = self.parameters_yaml['path_transfer_function_data']
-#         path_load_data = self.parameters_yaml['path_load_data']
-#
-#         horizon = self.parameters_yaml['time_slice']
-#         delta = self.parameters_yaml['delta']
-#         technologies = self.parameters_yaml['technologies']
-#         regions = self.parameters_yaml['regions']
-#         no_sites = self.parameters_yaml['cardinality_constraint']
-#
-#         price_ts = pd.read_csv('../input_data/el_price/elix_2014_2018.csv', index_col=0, sep=';')
-#         price_ts = price_ts['price']
-#         length_timeseries = self.outputs_pickle['capacity_factors_dict'][technologies[0]].shape[0]
-#
-#         if length_timeseries <= price_ts.size:
-#             price_ts = price_ts[:length_timeseries]
-#         else:
-#             el_ts_multiplier = int(ceil(length_timeseries / price_ts.size))
-#             price_ts = tile(price_ts, el_ts_multiplier)
-#             price_ts = price_ts[:length_timeseries]
-#
-#         load_ts = retrieve_load_data(path_load_data, horizon, delta, regions,
-#                                        alpha_plan='centralized', alpha_load_norm='min')
-#
-#         signal_dict = dict.fromkeys(choice, None)
-#         firmness_dict = dict.fromkeys(choice, None)
-#         difference_dict = dict.fromkeys(choice, None)
-#         result_dict = dict.fromkeys(['signal', 'difference', 'firmness'], None)
-#
-#
-#         database = read_database(path_resource_data)
-#         global_coordinates = get_global_coordinates(database, self.parameters_yaml['spatial_resolution'],
-#                                                     self.parameters_yaml['population_density_threshold'],
-#                                                     self.parameters_yaml['protected_areas_selection'],
-#                                                     self.parameters_yaml['protected_areas_threshold'],
-#                                                     self.parameters_yaml['altitude_threshold'],
-#                                                     self.parameters_yaml['forestry_threshold'],
-#                                                     self.parameters_yaml['depth_threshold'],
-#                                                     self.parameters_yaml['path_population_density_data'],
-#                                                     self.parameters_yaml['path_protected_areas_data'],
-#                                                     self.parameters_yaml['path_orogrpahy_data'],
-#                                                     self.parameters_yaml['path_landseamask'],
-#                                                     population_density_layer=self.parameters_yaml['population_density_layer'],
-#                                                     protected_areas_layer=self.parameters_yaml['protected_areas_layer'],
-#                                                     orography_layer=self.parameters_yaml['orography_layer'],
-#                                                     forestry_layer=self.parameters_yaml['forestry_layer'],
-#                                                     bathymetry_layer=self.parameters_yaml['bathymetry_layer'])
-#
-#         for c in choice:
-#
-#             if c == 'COMP':
-#
-#                 array_list = []
-#                 for tech in self.outputs_pickle['optimal_location_dict'].keys():
-#                     array_per_tech = array(self.outputs_pickle['capacity_factors_dict'][tech].sel(
-#                         locations=self.outputs_pickle['optimal_location_dict'][tech]).values).sum(axis=1)
-#                     array_list.append(array_per_tech)
-#
-#             elif c == 'RAND':
-#
-#                 region_coordinates = return_coordinates_from_countries(regions, global_coordinates, add_offshore=True)
-#                 truncated_data = selected_data(database, region_coordinates, horizon)
-#                 output_data = return_output(truncated_data, technologies, path_transfer_function_data)
-#
-#                 no_coordinates = sum(fromiter((len(lst) for lst in region_coordinates.values()), dtype=int))
-#
-#                 output = []
-#                 for item in output_data.keys():
-#                     output.append(output_data[item])
-#
-#                 output_overall = concat(output, dim='locations')
-#
-#                 score_init = 0.
-#                 for i in range(10000):
-#
-#                     location_list = []
-#                     ts_list = []
-#
-#                     idx = [randint(0, no_coordinates*len(technologies) - 1) for x in range(sum(no_sites))]
-#                     for loc in idx:
-#                         location_list.append(output_overall.isel(locations=loc).locations.values.flatten()[0])
-#                         ts_list.append(output_overall.isel(locations=loc).values)
-#
-#                     score = array(ts_list).sum()
-#
-#                     if score > score_init:
-#                         score_init = score
-#                         ts_incumbent = ts_list
-#
-#                 array_list = ts_incumbent
-#
-#             elif c == 'PROD':
-#
-#                 suboptimal_dict = dict.fromkeys(self.parameters_yaml['regions'], None)
-#                 suboptimal_dict_ts = deepcopy(suboptimal_dict)
-#
-#                 if len(no_sites) == 1:
-#
-#                     location_list = []
-#                     ts_list = []
-#                     truncated_data_list = []
-#                     output_data_list = []
-#
-#                     for key in suboptimal_dict.keys():
-#
-#                         region_coordinates = return_coordinates_from_countries(key, global_coordinates,
-#                                                                                add_offshore=self.parameters_yaml[
-#                                                                                    'add_offshore'])
-#                         truncated_data = selected_data(database, region_coordinates, horizon)
-#
-#                         for k in technologies:
-#
-#                             tech = []
-#                             tech.append(k)
-#
-#                             output_data = return_output(truncated_data, tech, path_transfer_function_data)[k]
-#                             output_data_list.append(output_data)
-#
-#                             truncated_data_sum = output_data.sum(dim='time')
-#                             truncated_data_list.append(truncated_data_sum)
-#
-#                     truncated_data_concat = concat(truncated_data_list, dim='locations')
-#                     output_data_concat = concat(output_data_list, dim='locations')
-#
-#                     tdata = truncated_data_concat.argsort()[-no_sites[0]:]
-#
-#                     for loc in tdata.values:
-#                         location_list.append(output_data_concat.isel(locations=loc).locations.values.flatten()[0])
-#                         ts_list.append(output_data_concat.isel(locations=loc).values)
-#
-#                     array_list = ts_list
-#
-#                 else:
-#
-#                     idx = 0
-#
-#                     for key in suboptimal_dict.keys():
-#
-#
-#                         location_list = []
-#                         ts_list = []
-#                         output_data_list = []
-#                         truncated_data_list_per_region = []
-#
-#                         region_coordinates = return_coordinates_from_countries(key, global_coordinates,
-#                                                                                add_offshore=self.parameters_yaml[
-#                                                                                    'add_offshore'])
-#                         truncated_data = selected_data(database, region_coordinates, horizon)
-#
-#                         for k in technologies:
-#
-#                             tech = []
-#                             tech.append(k)
-#
-#                             output_data = return_output(truncated_data, tech, path_transfer_function_data)[k]
-#                             output_data_list.append(output_data)
-#
-#                             truncated_data_sum = output_data.sum(dim='time')
-#                             truncated_data_list_per_region.append(truncated_data_sum)
-#
-#                         truncated_data_concat_per_region = concat(truncated_data_list_per_region, dim='locations')
-#                         output_data_concat = concat(output_data_list, dim='locations')
-#
-#                         tdata = truncated_data_concat_per_region.argsort()[-no_sites[idx]:]
-#
-#                         for loc in tdata.values:
-#                             location_list.append(output_data_concat.isel(locations=loc).locations.values.flatten()[0])
-#                             ts_list.append(output_data_concat.isel(locations=loc).values)
-#
-#                         idx += 1
-#
-#                         suboptimal_dict[key] = location_list
-#                         suboptimal_dict_ts[key] = ts_list
-#
-#                     array_list = []
-#                     for region in suboptimal_dict_ts.keys():
-#                         array_per_tech = array(suboptimal_dict_ts[region]).sum(axis=0)
-#                         array_list.append(array_per_tech)
-#
-#             elif c == 'NSEA':
-#
-#                 location_list = []
-#                 ts_list = []
-#
-#                 region_coordinates = return_coordinates('NSea', global_coordinates)
-#                 truncated_data = selected_data(database, region_coordinates, horizon)
-#                 output_data = return_output(truncated_data, technologies, path_transfer_function_data)
-#
-#                 truncated_data_sum = output_data['wind_aerodyn'].sum(dim='time')
-#
-#                 tdata = truncated_data_sum.argsort()[-no_sites[0]:]
-#
-#                 for loc in tdata.values:
-#                     location_list.append(output_data['wind_aerodyn'].isel(locations=loc).locations.values.flatten()[0])
-#                     ts_list.append(output_data['wind_aerodyn'].isel(locations=loc).values)
-#
-#                 array_list = ts_list
-#
-#             array_sum = pd.Series(data=array(array_list).sum(axis=0))
-#             difference = array_sum.diff(periods=ndiff).dropna()
-#             firmness = assess_firmness(array_sum, firm_threshold * sum(self.parameters_yaml['cardinality_constraint']))
-#
-#
-#
-#
-#             print('-------------------------------------')
-#             print('NUMERICAL RESULTS FOR THE {} SET OF SITES.'.format(str(c)))
-#
-#             print('Variance of time series: {}'.format(round(array_sum.var(), 4)))
-#             print('Mean of time series: {}'.format(round(array_sum.mean(), 4)))
-#             print('Mean +/- std of time series: {}'.format(round(array_sum.mean() - array_sum.std(), 4)))
-#             print('p1, p5, p10 of time series: {}, {}, {}'.format(round(array_sum.quantile(q=0.01), 4),
-#                                                                   round(array_sum.quantile(q=0.05), 4),
-#                                                                   round(array_sum.quantile(q=0.1), 4)))
-#             print('{} difference count within +/- 1%, 5% of total output: {}, {}'.format(ndiff, difference.between(
-#                 left=-0.01 * sum(self.parameters_yaml['cardinality_constraint']),
-#                 right=0.01 * sum(self.parameters_yaml['cardinality_constraint'])).sum(), difference.between(
-#                 left=-0.05 * sum(self.parameters_yaml['cardinality_constraint']),
-#                 right=0.05 * sum(self.parameters_yaml['cardinality_constraint'])).sum()))
-#             print('Total estimated revenue: {}'.format(round(price_ts * array_sum).sum(), 4))
-#             print('Estimated low-yield (10, 20, 30%) revenue : {}, {}, {}'.format(
-#                 round(clip_revenue(array_sum, price_ts, 0.1), 4),
-#                 round(clip_revenue(array_sum, price_ts, 0.2), 4),
-#                 round(clip_revenue(array_sum, price_ts, 0.3), 4)))
-#             print('Estimated capacity credit for top (1, 5, 10%) peak demand hours (valid for centralized planning only): {}, {}, {}'.format(
-#                 round(assess_capacity_credit(load_ts, array_sum, sum(self.parameters_yaml['cardinality_constraint']), 0.99), 4),
-#                 round(assess_capacity_credit(load_ts, array_sum, sum(self.parameters_yaml['cardinality_constraint']), 0.95), 4),
-#                 round(assess_capacity_credit(load_ts, array_sum, sum(self.parameters_yaml['cardinality_constraint']), 0.90), 4))
-#             )
-#
-#             signal_dict[c] = array_sum
-#             difference_dict[c] = difference
-#             firmness_dict[c] = firmness
-#
-#         result_dict['signal'] = signal_dict
-#         result_dict['difference'] = difference_dict
-#         result_dict['firmness'] = firmness_dict
-#         result_dict['no_sites'] = sum(no_sites)
-#
-#         return result_dict
-#
-#
-#
-#
-#
-#
-#     def plot_numerics(self, result_dict, plot_cdf=False, plot_boxplot=True, plot_signal=False, plot_cdfirm=True, signal_range=[0, 8760]):
-#
-#         """Plotting different results.
-#
-#             Parameters:
-#
-#             ------------
-#
-#             result_dict : dict
-#                 Dictionary containing various indexed indicators to be plotted.
-#
-#             plot_cdf : boolean
-#                 Plot the cdf in its own frame.
-#
-#             boxplot : boolean
-#                 Plot boxplot in its own frame.
-#
-#
-#             plot_signal : boolean
-#                 Plot the resource time series in its own frame.
-#
-#             signal_range : list
-#                 List of integers defining range of signal to be displayed.
-#
-#             one_plot : boolean
-#                 Plot all subplots in one frame.
-#
-#         """
-#
-#         if plot_cdf == True:
-#
-#             colors = islice(cycle(('royalblue', 'crimson', 'forestgreen', 'goldenrod')), 0, len(result_dict['signal'].keys()))
-#             plt.clf()
-#             ax = plt.subplot(111)
-#
-#             for c in list({str(j) for i in result_dict.values() for j in i}):
-#                 ax.hist(result_dict['signal'][c], bins=1000, density=True, cumulative=True, label=str(c), color=next(colors),
-#                          histtype='step', alpha=0.8, linewidth=1.0)
-#
-#             ax.set_xlabel('Aggregated output [-]')
-#             ax.set_xticks(array([0., 10., 15., 20.]))
-#             ax.set_ylabel('Probability [-]')
-#             ax.set_yticks(arange(0.2, 1.1, step=0.2))
-#
-#             ax.legend(fontsize='medium', frameon=False, bbox_to_anchor=(0, 1.02, 1, 0.2), loc="lower left",
-#                        mode="expand", borderaxespad=0, ncol=len(result_dict['signal'].keys()))
-#
-#             plt.savefig('cdf.pdf', dpi=200, bbox_inches='tight')
-#             plt.show()
-#
-#
-#         if plot_signal == True:
-#             colors = islice(cycle(('royalblue', 'crimson', 'forestgreen', 'goldenrod')), 0, len(result_dict['signal'].keys()))
-#             plt.clf()
-#             for c in result_dict['signal'].keys():
-#
-#                 plt.plot(range(signal_range[0], signal_range[1]), result_dict['signal'][c][signal_range[0]:signal_range[1]], label=str(c), color=next(colors))
-#             plt.legend(loc='best')
-#             plt.show()
-#
-#
-#         if plot_cdfirm == True:
-#
-#             colors = islice(cycle(('royalblue', 'crimson', 'forestgreen', 'goldenrod')), 0, len(result_dict['signal'].keys()))
-#             linestyles = islice(cycle(('-', '--', '-.', ':')), 0, len(result_dict['signal'].keys()))
-#
-#             plt.clf()
-#
-#             fig = plt.figure(figsize=(5, 7))
-#
-#             ax1 = fig.add_subplot(211)
-#             ax1.set_xlabel('Aggregated output [-]', fontsize=10)
-#             ax1.set_ylabel('Probability [-]', fontsize=10)
-#             ax1.set_yticks(arange(0.2, 1.1, step=0.2))
-#
-#             ax2 = fig.add_subplot(212)
-#             ax2.set_xlabel('Firm window length [h]', fontsize=10)
-#             ax2.set_ylabel('Occurrences [-]', fontsize=10)
-#             ax2.set_yscale('log', nonposy='clip')
-#
-#             for c in result_dict['signal'].keys():
-#
-#                 color = next(colors)
-#                 linestyle = next(linestyles)
-#
-#                 ax1.hist(result_dict['signal'][c], bins=1000, density=True, cumulative=True, label=str(c), color=color,
-#                          histtype='step', alpha=1.0, linewidth=1.0, linestyle=linestyle)
-#                 ax2.hist(result_dict['firmness'][c], bins=50, label=str(c), color=color, cumulative=False, alpha=1.0,
-#                          histtype='step', linewidth=1.0, linestyle=linestyle)
-#
-#             ax1.legend(fontsize='medium', frameon=False, bbox_to_anchor=(0, 1.02, 1, 0.2), loc="lower left",
-#                        mode="expand", borderaxespad=0, ncol=len(result_dict['signal'].keys()))
-#
-#             fig.tight_layout()
-#             fig.savefig(abspath(join(self.output_folder_path,
-#                                  'numerics_plot.pdf')), bbox_inches='tight', dpi=300)
-#
-#
-#         if plot_boxplot == True:
-#
-#             df = pd.DataFrame(columns=list(result_dict['signal'].keys()))
-#
-#             for c in df.columns:
-#                 df[str(c)] = result_dict['signal'][c]
-#
-#             datetime_idx = pd.date_range('2008-01-01', periods=df.shape[0], freq='H')
-#             df.index = datetime_idx
-#             hour = df.index.hour
-#
-#             df_filtered = df.iloc[(hour == 6) | (hour == 9) | (hour == 12) | (hour == 15) | (hour == 18) | (hour == 21)]
-#
-#             fig, axs = plt.subplots(1, len(df.columns), figsize=(13,3), facecolor='w', edgecolor='k')
-#             fig.subplots_adjust(wspace=.0001)
-#
-#             axs = axs.ravel()
-#
-#             for i, name in enumerate(df.columns):
-#                 axs[i].title.set_text(str(name))
-#                 axs[i].set_xlabel('Hour of the day')
-#                 axs[i].tick_params(axis='y', which='both', length=0)
-#                 axs[i].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
-#                 axs[i].set_ylim([-1, float(result_dict['no_sites'])+1])
-#
-#                 if i == 0:
-#                     axs[i].set_ylabel('Aggregated output [-]')
-#
-#                 if (i != 0) & (i != len(df.columns) - 1):
-#                     axs[i].set_yticklabels([])
-#                 if i == len(df.columns) - 1:
-#                     axs[i].yaxis.tick_right()
-#                     axs[i].yaxis.set_label_position("right")
-#                     axs[i].set_ylabel('Aggregated output [-]')
-#                 df_filtered.set_index(df_filtered.index.hour, append=True)[name].unstack().plot.box(ax=axs[i], sym='+',
-#                                                                                                     color=dict(
-#                                                                                                         boxes='royalblue',
-#                                                                                                         whiskers='royalblue',
-#                                                                                                         medians='forestgreen',
-#                                                                                                         caps='royalblue'),
-#                                                                                                     boxprops=dict(
-#                                                                                                         linestyle='-',
-#                                                                                                         linewidth=1.5),
-#                                                                                                     flierprops=dict(
-#                                                                                                         linewidth=1.5,
-#                                                                                                         markeredgecolor='crimson',
-#                                                                                                         color='crimson'),
-#                                                                                                     medianprops=dict(
-#                                                                                                         linestyle='-',
-#                                                                                                         linewidth=1.5),
-#                                                                                                     whiskerprops=dict(
-#                                                                                                         linestyle='-',
-#                                                                                                         linewidth=1.5),
-#                                                                                                     capprops=dict(
-#                                                                                                         linestyle='-',
-#                                                                                                         linewidth=1.5)
-#                                                                                                     )
-#                 axs[i].yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
-#             fig.tight_layout()
-#             fig.savefig(abspath(join(self.output_folder_path,
-#                                      'boxplot.pdf')), bbox_inches='tight', dpi=300)
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#     def optimal_locations_plot(self):
-#
-#         """Plotting the optimal locations."""
-#
-#         plt.clf()
-#
-#         base = plot_basemap(self.outputs_pickle['coordinates_dict'])
-#         map = base['basemap']
-#
-#         map.scatter(base['lons'], base['lats'], transform=base['projection'], marker='o', color='darkgrey', s=base['width']/1e7, zorder=2, alpha=1.0)
-#
-#         tech_list = list(self.outputs_pickle['optimal_location_dict'].keys())
-#         tech_set = list(chain.from_iterable(combinations(tech_list, n) for n in range(1, len(tech_list)+1)))
-#         locations_plot = dict.fromkeys(tech_set, None)
-#
-#         for key in locations_plot.keys():
-#             set_list = []
-#
-#             for k in key:
-#                 set_list.append(set(self.outputs_pickle['optimal_location_dict'][k]))
-#             locations_plot[key] = set.intersection(*set_list)
-#
-#         for key in locations_plot.keys():
-#             proxy = set()
-#             init = locations_plot[key]
-#             subkeys = [x for x in tech_set if (x != key and len(x) > len(key))]
-#
-#             if len(subkeys) > 0:
-#
-#                 for k in subkeys:
-#                     if proxy == set():
-#                         proxy = locations_plot[key].difference(locations_plot[k])
-#                     else:
-#                         proxy = proxy.difference(locations_plot[k])
-#                 locations_plot[key] = list(proxy)
-#
-#             else:
-#                 locations_plot[key] = list(init)
-#
-#         markers = islice(cycle(('.')), 0, len(locations_plot.keys()))
-#         colors = islice(cycle(('royalblue','crimson','forestgreen','goldenrod')), 0, len(locations_plot.keys()))
-#         for key in locations_plot.keys():
-#
-#             longitudes = [i[0] for i in locations_plot[key]]
-#             latitudes = [i[1] for i in locations_plot[key]]
-#             map.scatter(longitudes, latitudes, transform=base['projection'], marker=next(markers), color=next(colors),
-#                         s=base['width']/(1e5), zorder=3, alpha=0.9, label=str(key))
-#
-#
-#
-#         L = plt.legend(loc='lower left', fontsize='medium')
-#         L.get_texts()[0].set_text('Wind')
-#         L.get_texts()[1].set_text('PV')
-#         L.get_texts()[2].set_text('Wind & PV')
-#
-#         plt.savefig(abspath(join(self.output_folder_path,
-#                                  'optimal_deployment.pdf')),
-#                                  bbox_inches='tight', dpi=300)
-#
-#
-#
-#
-#
-#
-#     def retrieve_max_locations(self):
-#
-#         path_resource_data = self.parameters_yaml['path_resource_data'] + str(self.parameters_yaml['spatial_resolution']) + '/'
-#         path_transfer_function_data = self.parameters_yaml['path_transfer_function_data']
-#
-#         horizon = self.parameters_yaml['time_slice']
-#         technologies = self.parameters_yaml['technologies']
-#         no_sites = self.parameters_yaml['cardinality_constraint']
-#
-#         database = read_database(path_resource_data)
-#         global_coordinates = get_global_coordinates(database, self.parameters_yaml['spatial_resolution'],
-#                                                     self.parameters_yaml['population_density_threshold'],
-#                                                     self.parameters_yaml['protected_areas_selection'],
-#                                                     self.parameters_yaml['protected_areas_threshold'],
-#                                                     self.parameters_yaml['altitude_threshold'],
-#                                                     self.parameters_yaml['forestry_threshold'],
-#                                                     self.parameters_yaml['depth_threshold'],
-#                                                     self.parameters_yaml['path_population_density_data'],
-#                                                     self.parameters_yaml['path_protected_areas_data'],
-#                                                     self.parameters_yaml['path_orogrpahy_data'],
-#                                                     self.parameters_yaml['path_landseamask'],
-#                                                     population_density_layer=self.parameters_yaml[
-#                                                         'population_density_layer'],
-#                                                     protected_areas_layer=self.parameters_yaml['protected_areas_layer'],
-#                                                     orography_layer=self.parameters_yaml['orography_layer'],
-#                                                     forestry_layer=self.parameters_yaml['forestry_layer'],
-#                                                     bathymetry_layer=self.parameters_yaml['bathymetry_layer'])
-#
-#         suboptimal_dict = dict.fromkeys(self.parameters_yaml['regions'], None)
-#
-#         location_list = []
-#         truncated_data_list = []
-#         output_data_list = []
-#
-#         if len(no_sites) == 1:
-#
-#             for key in suboptimal_dict.keys():
-#
-#                 region_coordinates = return_coordinates_from_countries(key, global_coordinates, add_offshore=True)
-#                 truncated_data = selected_data(database, region_coordinates, horizon)
-#
-#                 for k in technologies:
-#                     tech = []
-#                     tech.append(k)
-#
-#                     output_data = return_output(truncated_data, tech, path_transfer_function_data)[k]
-#                     truncated_data_sum = output_data.sum(dim='time')
-#
-#                     truncated_data_list.append(truncated_data_sum)
-#                     output_data_list.append(output_data)
-#
-#             truncated_data_concat = concat(truncated_data_list, dim='locations')
-#             output_data_concat = concat(output_data_list, dim='locations')
-#
-#             tdata = truncated_data_concat.argsort()[-no_sites[0]:]
-#
-#             for loc in tdata.values:
-#                 location_list.append(output_data_concat.isel(locations=loc).locations.values.flatten()[0])
-#
-#         else:
-#
-#             raise ValueError(' Method not ready yet for partitioned problem.')
-#
-#         return location_list
-#
-#
-#
-#
-#
-#     def max_locations_plot(self, max_locations):
-#
-#         """Plotting the optimal vs max. locations."""
-#
-#         plt.clf()
-#
-#         base_max = plot_basemap(self.outputs_pickle['coordinates_dict'])
-#         map_max = base_max['basemap']
-#
-#         map_max.scatter(base_max['lons'], base_max['lats'], transform=base_max['projection'], marker='o', color='darkgrey', s=base_max['width']/1e7, zorder=2, alpha=1.0)
-#
-#         longitudes = [i[0] for i in max_locations]
-#         latitudes = [i[1] for i in max_locations]
-#         map_max.scatter(longitudes, latitudes, transform=base_max['projection'], marker='.', color='royalblue',
-#                         s=base_max['width']/(1e5), zorder=3, alpha=0.9, label='Wind')
-#
-#         plt.savefig(abspath(join(self.output_folder_path,
-#                                  'suboptimal_deployment_'+str('&'.join(tuple(self.outputs_pickle['coordinates_dict'].keys())))+'.pdf')),
-#                                  bbox_inches='tight', dpi=300)
-#
-#
-#
-#
-#
-#
-#     def optimal_locations_plot_heatmaps(self):
-#
-#         """Plotting the optimal location heatmaps."""
-#
-#         if (self.parameters_yaml['problem'] == 'Covering' and self.parameters_yaml['objective'] == 'budget') or \
-#                 (self.parameters_yaml['problem'] == 'Load' and self.parameters_yaml['objective'] == 'following'):
-#
-#             for it in self.outputs_pickle['deployed_capacities_dict'].keys():
-#
-#                 plt.clf()
-#
-#                 base = plot_basemap(self.outputs_pickle['coordinates_dict'])
-#                 map = base['basemap']
-#
-#                 location_and_capacity_list = []
-#                 print([val for vals in self.outputs_pickle['coordinates_dict'].values() for val in vals])
-#                 # for idx, location in enumerate(list(set([val for vals in self.outputs_pickle['coordinates_dict'].values() for val in vals]))):
-#                 for idx, location in enumerate([val for vals in self.outputs_pickle['coordinates_dict'].values() for val in vals]):
-#                     l = list(location)
-#                     l.append(self.outputs_pickle['deployed_capacities_dict'][it][idx])
-#                     location_and_capacity_list.append(l)
-#                 print(location_and_capacity_list)
-#                 df = pd.DataFrame(location_and_capacity_list, columns=['lon', 'lat', 'cap'])
-#
-#                 pl = map.scatter(df['lon'].values, df['lat'].values, transform=base['projection'], c=df['cap'].values, marker='s', s=base['width']/1e6, cmap=plt.cm.Reds, zorder=2)
-#
-#                 cbar = plt.colorbar(pl, ax=map, orientation= 'horizontal', pad=0.1, fraction=0.04, aspect=28)
-#                 cbar.set_label("GW", fontsize='8')
-#                 cbar.outline.set_visible(False)
-#                 cbar.ax.tick_params(labelsize='x-small')
-#
-#                 plt.savefig(abspath(join(self.output_folder_path, 'capacity_heatmap_'+str(it)+'.pdf')), bbox_inches='tight', dpi=300)
-#
-#         else:
-#             raise TypeError('WARNING! No such plotting capabilities for a basic deployment problem.')
-
-def distsphere(lat1, long1, lat2, long2):
-    """Calculates distance between two points on a sphere.
-
-    Parameters:
-
-    ------------
-
-    lat1, lon1, lat2, lon2 : float
-        Geographical coordinates of the two points.
-
-
-
-
-    Returns:
-
-    ------------
-
-   arc : float
-        Distance between points in radians.
-
-    """
-
-    # Convert latitude and longitude to
-    # spherical coordinates in radians.
-    degrees_to_radians = np.pi / 180.0
-
-    # phi = 90 - latitude
-    phi1 = (90.0 - lat1) * degrees_to_radians
-    phi2 = (90.0 - lat2) * degrees_to_radians
-
-    # theta = longitude
-    theta1 = long1 * degrees_to_radians
-    theta2 = long2 * degrees_to_radians
-
-    # Compute spherical distance from spherical coordinates.
-    cosine = (np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + np.cos(phi1) * np.cos(phi2))
-    arc = np.arccos(cosine)
-
-    # Remember to multiply arc by the radius of the earth!
-    return arc
-
-def update_latitude(lat1, arc):
-    """Helper function that adjusts the central latitude position.
-
-    Parameters:
-
-    ------------
-
-    lat1 : float
-
-    arc : float
-
-
-
-
-    Returns:
-
-    ------------
-
-   lat2 : float
-
-    """
-
-    degrees_to_radians = np.pi / 180.0
-    lat2 = (arc - ((90 - lat1) * degrees_to_radians)) * (1. / degrees_to_radians) + 90
-    return lat2
-
-
-
-
-
-def centerMap(lons, lats):
-    """Returns elements of the Basemap plot (center latitude and longitude,
-    height and width of the map).
-
-    Parameters:
-
-    ------------
-
-    lons : list
-
-    lats : list
-
-
-
-    Returns:
-
-    ------------
-
-    lon0, lat0, mapW, mapH : float
-
-    """
-    # Assumes -90 < Lat < 90 and -180 < Lon < 180, and
-    # latitude and logitude are in decimal degrees
-    earthRadius = 6378100.0  # earth's radius in meters
-
-    lon0 = ((max(lons) - min(lons)) / 2) + min(lons)
-
-    b = distsphere(max(lats), min(lons), max(lats), max(lons)) * earthRadius / 2
-    c = distsphere(max(lats), min(lons), min(lats), lon0) * earthRadius
-
-    # use pythagorean theorom to determine height of plot
-    mapH = np.sqrt(c ** 2 - b ** 2)
-    mapW = distsphere(min(lats), min(lons), min(lats), max(lons)) * earthRadius
-
-    arcCenter = (mapH / 2) / earthRadius
-    lat0 = update_latitude(min(lats), arcCenter)
-
-    minlon = min(lons) - 1
-    maxlon = max(lons) + 1
-    minlat = min(lats) - 1
-    maxlat = max(lats) + 1
-
-    return lon0, lat0, minlon, maxlon, minlat, maxlat, mapH, mapW
-
-def plot_basemap(coordinate_list):
-
-    longitudes = [i[0] for i in coordinate_list]
-    latitudes = [i[1] for i in coordinate_list]
-
-    lon0, lat0, minlon, maxlon, minlat, maxlat, mapH, mapW = centerMap(longitudes, latitudes)
-
-    land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m',
-                                            edgecolor='darkgrey',
-                                            facecolor=cfeature.COLORS['land_alt1'])
-
-    path_to_shapefile = '../input_data/shapefiles/EEZ_land_union/EEZ_Land_v3_202030.shp'
-    shapes = gpd.read_file(path_to_shapefile)
-    shapes = shapes[shapes['UNION'].isin(['Germany', 'France', 'Spain', 'United Kingdom', 'Greece',
-                                          'Norway', 'Denmark', 'Sweden', 'Finland', 'Latvia',
-                                          'Lithuania', 'Estonia', 'Portugal', 'Italy', 'Ireland',
-                                          'Poland', 'Netherlands', 'Belgium'])]
-
-    shapes_bound = gpd.GeoDataFrame(geometry=shapes.boundary)
-    shapes_bound['continent'] = 'Europe'
-    shapes_union = shapes_bound.dissolve(by='continent')
-
-    proj = ccrs.PlateCarree()
-    plt.figure(figsize=(10, 6))
-
-    ax = plt.axes(projection=proj)
-    ax.set_extent([-18., 37., 30., 75.], proj)
-
-    ax.add_geometries(shapes_union.geometry, facecolor='none', edgecolor='darkgrey', crs=proj, linewidth=0.5)
-
-    ax.add_feature(land_50m, linewidth=0.5)
-    ax.add_feature(cfeature.OCEAN, facecolor='white')
-    ax.add_feature(cfeature.LAKES, facecolor='white')
-    ax.add_feature(cfeature.BORDERS.with_scale('50m'), edgecolor='darkgrey', linewidth=0.5)
-
-    ax.outline_patch.set_edgecolor('white')
-
-    return {'basemap': ax,
-            'projection': proj,
-            'lons': longitudes,
-            'lats': latitudes,
-            'width': mapW}
-
-
-
-def locations_plot():
-
-    path_to_folder = f'../output_data/book_1y_n92_k1_max_c1_BB'
-    cases = ['prod', 'comp']
-    colors = {'wind_onshore': 'dodgerblue',
-              'solar_utility': 'darkred'}
-
-    for case in cases:
-
-        plt.clf()
-
-        fn = join(path_to_folder, f"{case}_site_data.p")
-        file = pickle.load(open(fn, 'rb'))
-        techs = set([i[0] for i in file.columns])
-
-        all_locs_fn = join(path_to_folder, 'init_coordinates_dict.p')
-        all_locs = pickle.load(open(all_locs_fn, 'rb'))['wind_onshore']
-
-        base = plot_basemap(list(set(all_locs)))
-        map = base['basemap']
-
-        for tech in techs:
-            chosen_sites = [i[1] for i in file.columns if i[0] == tech]
-            map.scatter([i[0] for i in chosen_sites], [i[1] for i in chosen_sites], transform=base['projection'],
-                        marker='o', color=colors[tech],
-                        s=base['width'] / (1*1e6), zorder=5, alpha=1.0)
-
-        # import matplotlib.patches as mpatches
-        #
-        # red_patch = mpatches.Patch(color='darkred', label='PROD')
-        # blue_patch = mpatches.Patch(color='dodgerblue', label='COMP')
-        # green_patch = mpatches.Patch(color='forestgreen', label='PROD & COMP')
-        #
-        # plt.legend(handles=[red_patch, blue_patch, green_patch], loc='best', fontsize='large')
-
-        plt.title('')
-        # plt.savefig(abspath(join('../output_data/', f'depl_{run}.pdf')), bbox_inches='tight', dpi=300)
-        plt.show()
\ No newline at end of file
diff --git a/src_postprocessing/output_tools.py b/src_postprocessing/output_tools.py
deleted file mode 100644
index 78026ec794c6811063e1a4ec0cebc4352dcab24f..0000000000000000000000000000000000000000
--- a/src_postprocessing/output_tools.py
+++ /dev/null
@@ -1,746 +0,0 @@
-from scipy import sin, cos, pi, arccos
-import numpy as np
-import pickle
-import pandas as pd
-import yaml
-from os.path import join
-from ast import literal_eval
-from matplotlib.path import Path
-import cartopy.crs as ccrs
-import cartopy.feature as cfeature
-from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
-import matplotlib.pyplot as plt
-import matplotlib.ticker as mticker
-
-
-
-def _rect_inter_inner(x1,x2):
-    n1=x1.shape[0]-1
-    n2=x2.shape[0]-1
-    X1=np.c_[x1[:-1],x1[1:]]
-    X2=np.c_[x2[:-1],x2[1:]]
-    S1=np.tile(X1.min(axis=1),(n2,1)).T
-    S2=np.tile(X2.max(axis=1),(n1,1))
-    S3=np.tile(X1.max(axis=1),(n2,1)).T
-    S4=np.tile(X2.min(axis=1),(n1,1))
-    return S1,S2,S3,S4
-
-def _rectangle_intersection_(x1,y1,x2,y2):
-    S1,S2,S3,S4=_rect_inter_inner(x1,x2)
-    S5,S6,S7,S8=_rect_inter_inner(y1,y2)
-
-    C1=np.less_equal(S1,S2)
-    C2=np.greater_equal(S3,S4)
-    C3=np.less_equal(S5,S6)
-    C4=np.greater_equal(S7,S8)
-
-    ii,jj=np.nonzero(C1 & C2 & C3 & C4)
-    return ii,jj
-
-def intersection(x1,y1,x2,y2):
-    """
-INTERSECTIONS Intersections of curves.
-   Computes the (x,y) locations where two curves intersect.  The curves
-   can be broken with NaNs or have vertical segments.
-usage:
-x,y=intersection(x1,y1,x2,y2)
-    Example:
-    a, b = 1, 2
-    phi = np.linspace(3, 10, 100)
-    x1 = a*phi - b*np.sin(phi)
-    y1 = a - b*np.cos(phi)
-    x2=phi
-    y2=np.sin(phi)+2
-    x,y=intersection(x1,y1,x2,y2)
-    plt.plot(x1,y1,c='r')
-    plt.plot(x2,y2,c='g')
-    plt.plot(x,y,'*k')
-    plt.show()
-    """
-    ii,jj=_rectangle_intersection_(x1,y1,x2,y2)
-    n=len(ii)
-
-    dxy1=np.diff(np.c_[x1,y1],axis=0)
-    dxy2=np.diff(np.c_[x2,y2],axis=0)
-
-    T=np.zeros((4,n))
-    AA=np.zeros((4,4,n))
-    AA[0:2,2,:]=-1
-    AA[2:4,3,:]=-1
-    AA[0::2,0,:]=dxy1[ii,:].T
-    AA[1::2,1,:]=dxy2[jj,:].T
-
-    BB=np.zeros((4,n))
-    BB[0,:]=-x1[ii].ravel()
-    BB[1,:]=-x2[jj].ravel()
-    BB[2,:]=-y1[ii].ravel()
-    BB[3,:]=-y2[jj].ravel()
-
-    for i in range(n):
-        try:
-            T[:,i]=np.linalg.solve(AA[:,:,i],BB[:,i])
-        except:
-            T[:,i]=np.NaN
-
-
-    in_range= (T[0,:] >=0) & (T[1,:] >=0) & (T[0,:] <=1) & (T[1,:] <=1)
-
-    xy0=T[2:,in_range]
-    xy0=xy0.T
-    return xy0[:,0],xy0[:,1]
-
-
-def clip_revenue(ts, el_price, ceiling):
-    """Computes revenues associated with some synthetic timeseries.
-
-    Parameters:
-
-    ------------
-
-    ts : TimeSeries
-        Electricity generation time series.
-
-    el_price : TimeSeries
-        Electricity price time series
-
-    ceiling : float
-        Upper bound of electricity price, above which the value is clipped.
-
-    Returns:
-
-    ------------
-
-    revenue : TimeSeries
-        Time series of hourly-sampled revenue..
-
-    """
-
-    ts_clip = ts.where(ts <= np.quantile(ts, ceiling), 0.)
-    revenue = (ts_clip * el_price).sum()
-
-    return revenue
-
-
-def assess_firmness(ts, threshold):
-    """Function assessing time series "firmness".
-
-    Parameters:
-
-    ------------
-
-    ts : TimeSeries
-        Electricity generation time series.
-
-    threshold : float
-        Capacity factor value compared to which the firmness of the
-        time series is assessed.
-
-    Returns:
-
-    ------------
-
-    sequences : list
-        List of integers representing the lengths of time windows with
-        non-interrupted capacity factor values above "threshold".
-
-    """
-
-    # Replace all values smaller than the threshold with 0.
-    mask = np.where(ts >= threshold, ts, 0)
-    # Retrieve the indices of non-zeros from the time series.
-    no_zeros = np.nonzero(mask != 0)[0]
-    # Determine the length of the consecutive non-zero instances.
-    sequences = [len(i) for i in np.split(no_zeros, np.where(np.diff(no_zeros) != 1)[0]+1)]
-
-    return sequences
-
-
-def assess_capacity_credit(ts_load, ts_gen, no_deployments, threshold):
-
-    ts_load_array = ts_load.values
-    ts_load_mask = np.where(ts_load_array >= np.quantile(ts_load_array, threshold), 1., 0.)
-    ts_load_mask = pd.Series(data = ts_load_mask)
-    ts_gen_mean = ts_gen / no_deployments
-    proxy = ts_load_mask * ts_gen_mean
-    proxy_nonzero = proxy.iloc[proxy.to_numpy().nonzero()[0]]
-
-    return proxy_nonzero.mean()
-
-
-def distsphere(lat1, long1, lat2, long2):
-    """Calculates distance between two points on a sphere.
-
-    Parameters:
-
-    ------------
-
-    lat1, lon1, lat2, lon2 : float
-        Geographical coordinates of the two points.
-
-
-
-
-    Returns:
-
-    ------------
-
-   arc : float
-        Distance between points in radians.
-
-    """
-
-    # Convert latitude and longitude to
-    # spherical coordinates in radians.
-    degrees_to_radians = pi / 180.0
-
-    # phi = 90 - latitude
-    phi1 = (90.0 - lat1) * degrees_to_radians
-    phi2 = (90.0 - lat2) * degrees_to_radians
-
-    # theta = longitude
-    theta1 = long1 * degrees_to_radians
-    theta2 = long2 * degrees_to_radians
-
-    # Compute spherical distance from spherical coordinates.
-    cosine = (sin(phi1) * sin(phi2) * cos(theta1 - theta2) + cos(phi1) * cos(phi2))
-    arc = arccos(cosine)
-
-    # Remember to multiply arc by the radius of the earth!
-    return arc
-
-
-
-
-
-def update_latitude(lat1, arc):
-    """Helper function that adjusts the central latitude position.
-
-    Parameters:
-
-    ------------
-
-    lat1 : float
-
-    arc : float
-
-
-
-
-    Returns:
-
-    ------------
-
-   lat2 : float
-
-    """
-
-    degrees_to_radians = pi / 180.0
-    lat2 = (arc - ((90 - lat1) * degrees_to_radians)) * (1. / degrees_to_radians) + 90
-    return lat2
-
-
-
-
-
-def centerMap(lons, lats):
-    """Returns elements of the Basemap plot (center latitude and longitude,
-    height and width of the map).
-
-    Parameters:
-
-    ------------
-
-    lons : list
-
-    lats : list
-
-
-
-    Returns:
-
-    ------------
-
-    lon0, lat0, mapW, mapH : float
-
-    """
-    # Assumes -90 < Lat < 90 and -180 < Lon < 180, and
-    # latitude and logitude are in decimal degrees
-    earthRadius = 6378100.0  # earth's radius in meters
-
-    lon0 = ((max(lons) - min(lons)) / 2) + min(lons)
-
-    b = distsphere(max(lats), min(lons), max(lats), max(lons)) * earthRadius / 2
-    c = distsphere(max(lats), min(lons), min(lats), lon0) * earthRadius
-
-    # use pythagorean theorom to determine height of plot
-    mapH = np.sqrt(c ** 2 - b ** 2)
-    mapW = distsphere(min(lats), min(lons), min(lats), max(lons)) * earthRadius
-
-    arcCenter = (mapH / 2) / earthRadius
-    lat0 = update_latitude(min(lats), arcCenter)
-
-    minlon = min(lons) - 1
-    maxlon = max(lons) + 1
-    minlat = min(lats) - 1
-    maxlat = max(lats) + 1
-
-    return lon0, lat0, minlon, maxlon, minlat, maxlat, mapH, mapW
-
-
-
-def return_coordinates(regions, global_coordinates):
-    """Returns coordinate pairs associated with a given region. If the region
-       is not pre-defined, the user is requested to input a series of tuples
-       representing the vertices of a polygon defining the area of interest.
-
-    Parameters:
-
-    ------------
-
-    regions : str/list
-        Region for which coordinate pairs are extracted.
-
-    global_coordinates : list
-        List of all available coordinates.
-
-    filter_points : boolean
-        If True, certain points will be removed from the "updated" coordinate set.
-
-
-
-    Returns:
-
-    ------------
-
-    coordinates_dict : dictionary
-        (Key, value) pairs of coordinates associated to each input region.
-
-    """
-
-    def get_points(polygon):
-
-        """Return list of points inside a polygon.
-
-        Parameters:
-
-        ------------
-
-        polygon : list
-            List of tuples defining a polygon.
-
-
-        Returns:
-
-        ------------
-
-        location_list : list
-            List with all elements inside the polygon.
-
-        """
-        # Creates a polygon path based on points given as input.
-        p = Path(polygon)
-        # Checks which points in 'global_coordinates' are within the polygon.
-        # Returns True/False
-        masked_list = p.contains_points(global_coordinates)
-        # Returns the location for which the above is True.
-        location_list = [global_coordinates[x] for x in np.where(masked_list == 1)[0]]
-
-        return location_list
-
-
-
-
-
-
-    def get_coordinates_within_polygon(region):
-
-        """Return list of points inside a polygon.
-
-        Parameters:
-
-        ------------
-
-        region : str
-            Region of interest.
-
-
-
-        Returns:
-
-        ------------
-
-        location_list : list
-            Coordinate paris associated with the input region.
-
-        """
-
-        # A couple regions defined hereafter.
-        if region == 'EU':
-            polygon = [(-9.07, 36.97), (-9.62, 38.72), (-8.97, 41.11), (-9.48, 43.11),
-                       (-7.78, 43.9), (-1.88, 43.75), (-1.61, 46.13),
-                       (-2.79, 47.27), (-4.95, 47.96), (-5.02, 48.82), (-10.94, 52.21), (-7.30, 58.53), (-2.22, 59.09), (4.81, 62.17),
-                       (10.50, 65.00), (31.91, 65.00), (27.68, 60.30), (28.41, 56.26), (23.71, 53.77), (24.22, 50.57), (22.28, 48.39),
-                       (26.73, 48.43), (28.10, 46.91), (28.25, 45.50),
-                       (29.92, 45.48), (28.17, 41.97), (25.89, 40.67),
-                       (24.09, 39.95), (24.68, 36.70), (21.64, 36.55), (19.24, 40.39),
-                       (19.13, 41.74), (13.44, 45.14), (12.54, 44.91),
-                       (18.71, 40.01), (15.05, 36.46), (12.13, 37.97), (15.33, 38.52),
-                       (14.98, 40.02), (12.25, 41.39), (10.20, 42.88),
-                       (9.01, 44.16), (6.51, 42.91), (3.72, 43.05), (3.17, 41.60),
-                       (0.64, 40.35), (0.37, 38.67), (-0.59, 37.53), (-2.06, 36.54),
-                       (-5.61, 35.94)]
-
-        elif region == 'ContEU':
-            polygon = [(-9.07, 36.97), (-9.62, 38.72), (-8.97, 41.11), (-9.48, 43.11),
-                       (-7.78, 43.9), (-1.88, 43.75), (-1.61, 46.13),
-                       (-2.79, 47.27), (-4.95, 47.96), (-5.02, 48.82),
-                       (-1.82, 49.83), (2.35, 51.33), (3.14, 53.48), (7.91, 54.22),
-                       (7.73, 57.22), (10.61, 57.99), (11.15, 56.42),
-                       (10.89, 54.65), (13.42, 54.81), (18.41, 55.18), (19.37, 54.46),
-                       (23.25, 54.36), (24.10, 50.44), (22.29, 48.41),
-                       (24.91, 47.87), (26.73, 48.43), (28.10, 46.91), (28.25, 45.50),
-                       (29.92, 45.48), (28.17, 41.97), (25.89, 40.67),
-                       (24.09, 39.95), (24.68, 36.70), (21.64, 36.55), (19.24, 40.39),
-                       (19.13, 41.74), (13.44, 45.14), (12.54, 44.91),
-                       (18.71, 40.01), (15.05, 36.46), (12.13, 37.97), (15.33, 38.52),
-                       (14.98, 40.02), (12.25, 41.39), (10.20, 42.88),
-                       (9.01, 44.16), (6.51, 42.91), (3.72, 43.05), (3.17, 41.60),
-                       (0.64, 40.35), (0.37, 38.67), (-0.59, 37.53), (-2.06, 36.54),
-                       (-5.61, 35.94)]
-
-        elif region == 'CWE':
-            polygon = [(-1.81, 43.43), (-1.45, 46.04), (-4.98, 48.31),
-                       (-1.86, 49.72), (2.36, 51.10), (4.65, 53.06),
-                       (8.33, 54.87), (14.08, 54.59), (14.83, 51.04),
-                       (12.02, 50.22), (13.65, 48.8), (12.9, 47.66),
-                       (7.41, 47.62), (5.83, 46.27), (7.77, 43.66),
-                       (6.36, 43.09), (3.90, 43.21), (3.2, 42.41)]
-
-        elif region == 'FR':
-            polygon = [(-1.81, 43.43), (-1.45, 46.04), (-4.98, 48.31),
-                       (-1.86, 49.72), (2.36, 51.10), (4.47, 49.85),
-                       (7.94, 48.97), (7.41, 47.62), (5.83, 46.27),
-                       (7.77, 43.66), (6.36, 43.09), (3.90, 43.21), (3.2, 42.41)]
-
-        elif region == 'DE':
-            polygon = [(6.85, 53.82), (7.02, 52.04), (6.15, 50.89),
-                       (6.68, 49.30), (7.94, 48.97), (7.41, 47.62),
-                       (12.9, 47.66), (13.65, 48.8), (12.02, 50.22),
-                       (14.83, 51.04), (14.08, 54.59), (8.33, 54.87)]
-
-        elif region == 'BL':
-            polygon = [(2.36, 51.10), (4.65, 53.06), (6.85, 53.82),
-                       (7.02, 52.04), (6.15, 50.89), (6.68, 49.30)]
-
-        elif region == 'GR':
-            polygon = [(-52.1, 63.1), (-40.5, 62.9), (-42.5, 59.1), (-48.5, 60.3)]
-
-        elif region == 'IC':
-            polygon = [(-23.1, 63.7), (-18.9, 63.2), (-12.5, 65.1),
-                       (-16.1, 66.9), (-23.1, 66.6), (-24.6, 64.8)]
-
-        elif region == 'NA':
-            polygon = [(-20.05, 21.51), (-6.33, 35.88), (-5.11, 35.92), (-1.77, 35.6),
-                       (3.58, 37.64), (11.23, 37.57), (12.48, 34.28), (32.15, 32.04),
-                       (38.07, 21.76)]
-
-        elif region == 'ME':
-            polygon = [(36.0, 36.4), (34.2, 27.7), (44.4, 11.4), (61.7, 19.8), (59.6, 37.8), (35.9, 36.6)]
-
-        elif region == 'NSea':
-            polygon = [(-1.67, 57.65), (-2.55, 56.12), (1.85, 52.77), (2.54, 51.25), (3.66, 51.86), (4.66, 53.17), (8.71, 53.96), (5.01, 59.20)]
-
-        else:
-            print(' Region {} is not currently available.'.format(str(region)))
-            polygon = list(literal_eval(input('Please define boundary polygon (as series of coordinate pairs): ')))
-
-        if len(polygon) < 3:
-            raise ValueError(' Not much of a polygon to build with less than 3 edges. The more the merry.')
-        for item in polygon:
-            if not (isinstance(item, tuple)):
-                raise TypeError(' Check the type of the elements within the list. Should be tuples.')
-                for i in item:
-                    if not (isinstance(i, float)):
-                        raise TypeError(' Check the type of the elements within the tuples. Should be floats.')
-
-        coordinates_local = get_points(polygon)
-        if len(coordinates_local) == 0:
-            raise ValueError(' No available locations inside the given polygon.')
-
-        return coordinates_local
-
-    if isinstance(regions, str):
-
-        coordinates_region = get_coordinates_within_polygon(regions)
-        coordinates_dict = {str(regions): coordinates_region}
-
-    # This part comes handy if you want to assess criticality on an area
-    # containing two or more regions given as a list.
-    elif isinstance(regions, list):
-
-        coordinates_dict = dict.fromkeys(regions, None)
-
-        for subregion in regions:
-            coordinates_subregion = get_coordinates_within_polygon(subregion)
-            coordinates_subregion_sorted = sorted(coordinates_subregion,
-                                                  key=lambda x: (x[0], x[1]))
-            coordinates_dict[subregion] = coordinates_subregion_sorted
-
-
-    return coordinates_dict
-
-
-def polygon_contains_point(point_list, region_list):
-
-    """Return list of points inside a polygon.
-
-    Parameters:
-
-    ------------
-
-
-    Returns:
-
-    ------------
-
-
-    """
-
-    region_count = dict.fromkeys(region_list, 0)
-
-    for region in region_list:
-
-        count = 0
-
-        for point in point_list:
-
-            # A couple regions defined hereafter.
-            if region == 'ContEU':
-                polygon = [(-9.07, 36.97), (-9.62, 38.72), (-8.97, 41.11), (-9.48, 43.11), (-7.78, 43.9),
-                           (-1.88, 43.75), (-1.61, 46.13),
-                           (-2.79, 47.27), (-4.95, 47.96), (-5.02, 48.82), (-1.82, 49.83), (2.35, 51.33), (3.14, 53.48),
-                           (7.91, 54.22),
-                           (7.73, 57.22), (10.61, 57.99), (11.15, 56.42), (10.89, 54.65), (13.42, 54.81),
-                           (18.41, 55.18), (19.37, 54.46),
-                           (23.25, 54.36), (24.10, 50.44), (22.29, 48.41), (24.91, 47.87), (26.73, 48.43),
-                           (28.10, 46.91), (28.25, 45.50),
-                           (29.92, 45.48), (28.17, 41.97), (25.89, 40.67), (24.09, 39.95), (24.68, 36.70),
-                           (21.64, 36.55), (19.24, 40.39),
-                           (19.13, 41.74), (13.44, 45.14), (12.54, 44.91), (18.71, 40.01), (15.05, 36.46),
-                           (12.13, 37.97), (15.33, 38.52),
-                           (14.98, 40.02), (12.25, 41.39), (10.20, 42.88), (9.01, 44.16), (6.51, 42.91), (3.72, 43.05),
-                           (3.17, 41.60),
-                           (0.64, 40.35), (0.37, 38.67), (-0.59, 37.53), (-2.06, 36.54), (-5.61, 35.94)]
-
-            elif region == 'WestEU':
-                polygon = [(-1.81, 43.43), (-1.45, 46.04), (-4.98, 48.31), (-1.86, 49.72), (2.36, 51.10), (4.65, 53.06),
-                           (8.33, 54.87), (14.08, 54.59), (14.83, 51.04), (12.02, 50.22), (13.9, 48.7), (12.7, 48.12),
-                           (12.9, 47.66),
-                           (7.41, 47.62), (5.83, 46.27), (7.55, 43.74), (6.36, 43.09), (3.90, 43.21), (3.2, 42.41)]
-
-            elif region == 'CentralEU':
-                polygon = [(6.7, 53.6), (6.8, 45.9), (16.2, 46.8), (18.8, 49.6), (15.3, 51.2), (14.3, 54.0),
-                           (9.1, 55.0)]
-
-            elif region == 'SouthEU':
-                polygon = [(-9.82, 43.45), (3.68, 42.35), (6.62, 43.07), (7.55, 43.74), (6.85, 45.96), (12.12, 47.03),
-                           (13.64, 46.48),
-                           (13.51, 45.65), (12.35, 45.23), (13.64, 43.60), (16.19, 41.85), (18.57, 40.06),
-                           (15.11, 36.51),
-                           (12.21, 37.87), (15.23, 38.35), (15.61, 39.85), (12.09, 41.67), (8.79, 44.35), (7.55, 43.74),
-                           (6.84, 42.87), (3.68, 42.35), (-1.4, 36.7), (-5.6, 36.0), (-9.63, 37.0)]
-
-            elif region == 'NorthEU':
-                polygon = [(28.1, 60.2), (28.7, 70.0), (17.2, 70.0), (-10.0, 54.2), (-10.0, 51.4), (-5.5, 49.9),
-                           (1.5, 51.1)]
-
-            elif region == 'EastEU':
-                polygon = [(14.41, 54.71), (23.68, 54.43), (23.03, 48.37), (26.61, 48.33), (30.06, 44.96),
-                           (28.19, 41.92),
-                           (22.84, 36.03), (19.74, 39.72), (19.47, 41.69), (13.53, 45.71), (13.9, 48.7), (12.7, 48.12),
-                           (12.9, 47.66),
-                           (13.65, 48.8), (14.83, 51.04)]
-
-            elif region == 'CWE':
-                polygon = [(-1.81, 43.43), (-1.45, 46.04), (-4.98, 48.31), (-1.86, 49.72), (2.36, 51.10), (4.65, 53.06),
-                           (8.33, 54.87), (14.08, 54.59), (14.83, 51.04), (12.02, 50.22), (13.65, 48.8), (12.9, 47.66),
-                           (7.41, 47.62), (5.83, 46.27), (7.77, 43.66), (6.36, 43.09), (3.90, 43.21), (3.2, 42.41)]
-
-            elif region == 'FR':
-                polygon = [(-1.81, 43.43), (-1.45, 46.04), (-4.98, 48.31), (-1.86, 49.72), (2.36, 51.10), (4.47, 49.85),
-                           (7.94, 48.97),
-                           (7.41, 47.62), (5.83, 46.27), (7.77, 43.66), (6.36, 43.09), (3.90, 43.21), (3.2, 42.41)]
-
-            elif region == 'DE':
-                polygon = [(6.85, 53.82), (7.02, 52.04), (6.15, 50.89), (6.68, 49.30), (7.94, 48.97), (7.41, 47.62),
-                           (12.9, 47.66), (13.65, 48.8), (12.02, 50.22), (14.83, 51.04), (14.08, 54.59), (8.33, 54.87)]
-
-            elif region == 'BL':
-                polygon = [(2.36, 51.10), (4.65, 53.06), (6.85, 53.82), (7.02, 52.04), (6.15, 50.89), (6.68, 49.30)]
-
-            elif region == 'EastUS':
-                polygon = [(-67.1, 44.7), (-81.3, 30.0), (-88.4, 30.3), (-87.6, 41.6)]
-
-            elif region == 'CentralUS':
-                polygon = [(-87.6, 41.6), (-95.0, 48.7), (-109.4, 48.7), (-109.2, 31.5), (-90.0, 29.3)]
-
-            elif region == 'WestUS':
-                polygon = [(-109.4, 48.7), (-109.2, 31.5), (-117.5, 32.4), (-125.0, 39.7), (-125.0, 48.7)]
-
-            elif region == 'ToyUS':
-                polygon = [(-114.0, 38.5), (-114.0, 39.5), (-110., 40.)]
-
-            elif region == 'GR':
-                polygon = [(-52.1, 63.1), (-40.5, 62.9), (-42.5, 59.1), (-48.5, 60.3)]
-
-            elif region == 'IC':
-                polygon = [(-23.1, 63.7), (-18.9, 63.2), (-12.5, 65.1), (-16.1, 66.9), (-23.1, 66.6), (-24.6, 64.8)]
-
-            elif region == 'NA':
-                polygon = [(-20.05, 21.51), (-6.33, 35.88), (-5.11, 35.92), (-1.77, 35.6), (3.58, 37.64),
-                           (11.23, 37.57), (12.48, 34.28), (32.15, 32.04), (38.07, 21.76)]
-
-
-            else:
-                print('Region', region, 'is not currently available.')
-                polygon = list(literal_eval(input('Please define boundary polygon (as series of coordinate pairs): ')))
-
-            if len(polygon) < 3:
-                raise ValueError('Not much of a polygon to build with less than 3 edges. The more the merry.')
-            for item in polygon:
-                if not (isinstance(item, tuple)):
-                    raise ValueError('Check the type of the elements within the list.')
-                    for i in item:
-                        if not (isinstance(i, float)):
-                            raise ValueError('Check the type of the elements within the tuples.')
-
-            path = Path(polygon)
-            inside = int(path.contains_point(point))
-            count += inside
-
-        region_count[region] = count
-
-    return region_count
-
-
-
-
-def plot_basemap(coordinate_dict):
-    """Creates the base of the plot functions.
-
-    Parameters:
-
-    ------------
-
-    coordinate_dict : dict
-        Dictionary containing coodinate pairs within regions of interest.
-
-    Returns:
-
-    ------------
-
-    dict
-        Dictionary containing various elements of the plot.
-
-    """
-
-    coordinate_list = list(set([val for vals in coordinate_dict.values() for val in vals]))
-
-    longitudes = [i[0] for i in coordinate_list]
-    latitudes = [i[1] for i in coordinate_list]
-
-    lon0, lat0, minlon, maxlon, minlat, maxlat, mapH, mapW = centerMap(longitudes, latitudes)
-
-    land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m',
-                                            edgecolor='darkgrey',
-                                            facecolor=cfeature.COLORS['land_alt1'])
-
-    proj = ccrs.PlateCarree()
-    plt.figure(figsize=(10, 6))
-
-    ax = plt.axes(projection=proj)
-    ax.set_extent([minlon, maxlon, minlat, maxlat], proj)
-
-    ax.add_feature(land_50m, linewidth=0.5)
-    ax.add_feature(cfeature.OCEAN, facecolor='white')
-    ax.add_feature(cfeature.LAKES, facecolor='white')
-    ax.add_feature(cfeature.BORDERS.with_scale('50m'), edgecolor='darkgrey', linewidth=0.5)
-
-    gl = ax.gridlines(crs=proj, draw_labels=True, linewidth=0.5, color='gray', alpha=0.3, linestyle='--')
-    gl.xlabels_top = False
-    gl.ylabels_left = False
-    gl.xlabels_bottom = True
-    gl.xlocator = mticker.FixedLocator(np.arange(np.floor(minlon), np.ceil(maxlon+10), 5))
-    gl.ylocator = mticker.FixedLocator(np.arange(np.floor(minlat)-1, np.ceil(maxlat+10), 5))
-    gl.xformatter = LONGITUDE_FORMATTER
-    gl.yformatter = LATITUDE_FORMATTER
-
-    gl.xlabel_style = {'size': 6, 'color': 'gray'}
-    gl.ylabel_style = {'size': 6, 'color': 'gray'}
-
-    ax.outline_patch.set_edgecolor('white')
-
-    return {'basemap': ax,
-            'projection': proj,
-            'lons': longitudes,
-            'lats': latitudes,
-            'width': mapW}
-
-
-
-
-
-
-def read_inputs_plotting(output_path):
-    """Reads parameter file for plotting purposes.
-
-    Parameters:
-
-    ------------
-
-    output_path : str
-        Path towards output data.
-
-    Returns:
-
-    ------------
-
-    data : dict
-        Dictionary containing run parameters.
-
-    """
-
-    path_to_input = join(output_path, 'parameters.yml')
-
-    with open(path_to_input) as infile:
-        data = yaml.safe_load(infile)
-
-    return data
-
-
-
-
-
-def read_output(run_name):
-    """Reads outputs for a given run.
-
-    Parameters:
-
-    ------------
-
-    run_name : str
-        The name of the run (given by the function init_folder in tools.py).
-
-    Returns:
-
-    ------------
-
-    output_pickle : dict
-        Dict-like structure containing various relevant data structures..
-
-    """
-
-    path_to_file = join('../output_data/', run_name, 'output_model.p')
-    output_pickle = pickle.load(open(path_to_file, 'rb'))
-
-    return output_pickle
\ No newline at end of file