Skip to content
Snippets Groups Projects
Commit 0277154d authored by David Radu's avatar David Radu
Browse files

adding capacity-based criticality definition; plugging in capacities, instead of cardinalities

parent 90896847
No related branches found
No related tags found
No related merge requests found
......@@ -7,18 +7,21 @@ spatial_resolution: 0.25
# Start time and end time of the analysis.
time_slice: ['2018-01-01T00:00', '2018-01-31T23:00']
# Technologies to deploy.
regions: ['BE', 'LT', 'LV', 'EE', 'HR']
#regions: ['GB', 'NL', 'FR', 'DE', 'DK', 'NO', 'PL', 'IE', 'IT', 'SE', 'FI', 'ES', 'GR', 'PT', 'BE', 'LT', 'LV', 'EE', 'HR']
regions: ['BE', 'NL', 'LT', 'LV', 'EE', 'HR']
technologies: ['wind_offshore']
deployments: [[3], [4], [3], [2], [1]]
#deployments: [[80], [60], [57], [36], [35], [30], [28], [22], [20], [20], [15], [13], [10], [9], [6], [4], [3], [1], [1]]
deployments: [[6], [60], [4], [3], [1], [1]]
siting_params:
smooth_measure: 'mean' # median, percentiles
# Defines how \alpha is considered in space and time.
alpha: 'load_central' # load_central, load_partition
# Normalization procedures (detailed in tools.py). (min, max)
norm_type: 'max' # max, min
alpha:
method: 'load' # 'load'
coverage: 'system' # 'partition'
smoothing: 'mean' # 'median'
norm: 'min' # 'max'
# Time-window length used to compute the criticality indicator. Integer value.
delta: 1 # \in \mathbb{N}
delta: 3 # \in \mathbb{N}
# Threshold
c: 1 # < n, \in \mathbb{N}
# Solution method: BB or HEU or RAND or GRED.
......
......@@ -19,6 +19,9 @@ wind_onshore:
terrain_slope_threshold: 0.03
forestry_ratio_threshold: 0.8
latitude_threshold: 65.
legacy_min: 0.1
power_density: 5. # MW/sqkm
land_utilization_factor: 0.5
wind_offshore:
where: 'offshore'
......@@ -43,6 +46,8 @@ wind_offshore:
distance_threshold_min: 22.2
distance_threshold_max: 222.0 # 111.
legacy_min: 0.1
power_density: 6. # MW/sqkm
land_utilization_factor: 0.5
wind_floating:
where: 'offshore'
......@@ -66,6 +71,9 @@ wind_floating:
latitude_threshold: 65.
distance_threshold_min: 23.
distance_threshold_max: 180.
legacy_min: 0.1
power_density: 5. # MW/sqkm
land_utilization_factor: 0.5
pv_utility:
where: 'onshore'
......@@ -84,6 +92,9 @@ pv_utility:
terrain_slope_threshold: 0.03
forestry_ratio_threshold: 0.8
latitude_threshold: 65.
legacy_min: 0.1
power_density: 5. # MW/sqkm
land_utilization_factor: 0.5
#TODO: fix pv_residential filters
pv_residential:
......@@ -103,3 +114,6 @@ pv_residential:
terrain_slope_threshold: 1.
forestry_ratio_threshold: 1.
latitude_threshold: 65.
legacy_min: 0.1
power_density: 5. # MW/sqkm
land_utilization_factor: 0.5
......@@ -6,12 +6,17 @@ import pycountry as pyc
import xarray as xr
import yaml
from geopandas import read_file, GeoSeries
from numpy import hstack, arange, dtype, array, timedelta64, nan, sum
from pandas import read_csv, to_datetime, Series, notnull
from numpy import hstack, arange, dtype, array, timedelta64, nan, sum, deg2rad, sin, cos, arccos, ceil
from pandas import read_csv, to_datetime, Series, notnull, MultiIndex
from shapely import prepared
from shapely.geometry import Point
from shapely.ops import unary_union
from xarray import concat
import geopy
import logging
logging.basicConfig(level=logging.INFO, format=f"%(levelname)s %(asctime)s - %(message)s", datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
def chunk_split(l, n):
......@@ -343,9 +348,83 @@ def return_coordinates_from_shapefiles(resource_dataset, shapefiles_region):
return coordinates_in_region
def retrieve_load_data_partitions(data_path, date_slice, alpha, delta, regions, norm_type):
def capacity_to_cardinality(dataset, model_config, tech_config, site_coordinates_dict, legacy_coordinates_dict,
reference_lat=55, reference_lon=10, lat_dist_per_deg=111):
spatial_resolution = model_config['spatial_resolution']
deployment_dict = get_deployment_vector(model_config['regions'],
model_config['technologies'],
model_config['deployments'])
lon1 = deg2rad(reference_lon)
lon2 = deg2rad(reference_lon+spatial_resolution)
reference_lat = deg2rad(reference_lat)
dist_lat = lat_dist_per_deg*spatial_resolution
dist_lon = arccos(sin(reference_lat)*sin(reference_lat)+cos(reference_lat)*cos(reference_lat)*cos(lon2-lon1))*6371
cell_area = dist_lat*dist_lon
cardinality_dict = deepcopy(deployment_dict)
adj_cardinality_dict = deepcopy(deployment_dict)
legacy_dict = deepcopy(deployment_dict)
key_list = return_dict_keys(deployment_dict)
for region, tech in key_list:
tech_potential = tech_config[tech]['power_density'] * tech_config[tech]['land_utilization_factor'] * cell_area
cardinality_dict[region][tech] = int(ceil(deployment_dict[region][tech]/tech_potential*1e3))
assert alpha in ['load_central', 'load_partition'], f"Criticality definition {alpha} not available."
shape_region = union_regions([region], model_config['data_path'], which=tech_config[tech]['where'])
points_in_region = return_coordinates_from_shapefiles(dataset, shape_region)
legacy_dict[region][tech] = list(set(legacy_coordinates_dict[tech]).intersection(set(points_in_region)))
adj_cardinality_dict[region][tech] = min(len(site_coordinates_dict[region][tech]),
max(len(legacy_dict[region][tech]), cardinality_dict[region][tech]))
logger.info(f"Picking {adj_cardinality_dict[region][tech]} {tech} sites in {region} out of "
f"{len(site_coordinates_dict[region][tech])} candidate ones.")
return adj_cardinality_dict
def get_potential_per_site(input_dict, tech_parameters, spatial_resolution):
"""Compute cell potential of candidate siting locations based on reanalysis grids with different resolutions.
Parameters
----------
input_dict: dict
tech_parameters: dict
spatial_resolution: float
Returns
-------
output_dict: dict
"""
key_list = return_dict_keys(input_dict)
output_dict = deepcopy(input_dict)
for region, tech in key_list:
tech_dict = tech_parameters[tech]
locations = MultiIndex.from_tuples(input_dict[region][tech].locations.values, names=('longitude', 'latitude'))
potentials = Series(0., index=locations)
for (lon, lat) in potentials.index:
lat_south = (lon, lat - spatial_resolution / 2.)
lat_north = (lon, lat + spatial_resolution / 2.)
lon_west = (lon - spatial_resolution / 2., lat)
lon_east = (lon + spatial_resolution / 2., lat)
dist_lat = geopy.distance.distance(geopy.distance.lonlat(*lat_south), geopy.distance.lonlat(*lat_north)).km
dist_lon = geopy.distance.distance(geopy.distance.lonlat(*lon_west), geopy.distance.lonlat(*lon_east)).km
# 1e-3 converts from MW to GW
potentials[(lon, lat)] = \
dist_lat * dist_lon * tech_dict['power_density'] * tech_dict['land_utilization_factor'] * 1e-3
output_dict[region][tech] = xr.DataArray.from_series(potentials).stack(locations=('longitude', 'latitude'))\
.dropna(dim='locations').reindex_like(input_dict[region][tech])
return output_dict
def smooth_load_data(data_path, regions, date_slice, delta):
load_data_fn = join(data_path, 'input/load_data', 'load_entsoe_2006_2020_full.csv')
load_data = read_csv(load_data_fn, index_col=0)
......@@ -353,26 +432,29 @@ def retrieve_load_data_partitions(data_path, date_slice, alpha, delta, regions,
load_data_sliced = load_data.loc[date_slice[0]:date_slice[1]]
regions_list = return_region_divisions(regions, data_path)
load_vector = load_data_sliced[regions_list]
load_vector_rolling = load_vector.rolling(window=delta, center=True).mean().dropna()
if alpha == 'load_central':
load_vector = load_data_sliced[regions_list].sum(axis=1)
elif alpha == 'load_partition':
load_vector = load_data_sliced[regions_list]
return load_vector_rolling
load_vector_norm = return_filtered_and_normed(load_vector, delta, norm_type)
def norm_load_by_load(load_vector_rolling, norm):
if norm == 'min':
load_vector_norm = (load_vector_rolling - load_vector_rolling.min()) / \
(load_vector_rolling.max() - load_vector_rolling.min())
else:
load_vector_norm = load_vector_rolling.divide(load_vector_rolling.max())
return load_vector_norm
def return_filtered_and_normed(signal, delta, norm_type='min'):
def norm_load_by_deployments(load_vector_rolling, deployments):
l_smooth = signal.rolling(window=delta, center=True).mean().dropna()
if norm_type == 'min':
l_norm = (l_smooth - l_smooth.min()) / (l_smooth.max() - l_smooth.min())
else:
l_norm = l_smooth / l_smooth.max()
load_vector_norm = load_vector_rolling.divide(deployments)
return l_norm.values
return load_vector_norm
def filter_onshore_offshore_locations(coordinates_in_region, data_path, spatial_resolution, tech_dict, tech):
......
......@@ -26,7 +26,7 @@ using Distributions
function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Float64, n::Vector{Int64}, N::Int64, I::Int64, E::Int64, T_init::Float64, x_init::Array{Float64, 1}, locations_regions_mapping::Dict{Int64, Int64}, legacy_locations::Vector{Int64})
W, L = size(D)
P = maximum(values(locations_regions_mapping))
R = maximum(values(locations_regions_mapping))
# Pre-allocate lower bound vector
obj = Vector{Int64}(undef, I)
......@@ -38,28 +38,28 @@ function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Flo
ind_ones2zeros_tmp = Vector{Int64}(undef, N)
ind_zeros2ones_tmp = Vector{Int64}(undef, N)
regions = [i for i in 1:P]
sample_count_per_region = Vector{Int64}(undef, P)
init_sample_count_per_region = zeros(Int64, P)
ind_samples_per_region_tmp = Vector{Int64}(undef, P+1)
ind_samples_per_region_candidate = Vector{Int64}(undef, P+1)
locations_count_per_region = zeros(Int64, P)
legacy_locations_count_per_region = zeros(Int64, P)
index_range_per_region = Vector{Int64}(undef, P+1)
@inbounds for i = 1:L
if i in legacy_locations
legacy_locations_count_per_region[locations_regions_mapping[i]] += 1
regions = [i for i in 1:R]
sample_count_per_region = Vector{Int64}(undef, R)
init_sample_count_per_region = zeros(Int64, R)
ind_samples_per_region_tmp = Vector{Int64}(undef, R+1)
ind_samples_per_region_candidate = Vector{Int64}(undef, R+1)
locations_count_per_region = zeros(Int64, R)
legacy_locations_count_per_region = zeros(Int64, R)
index_range_per_region = Vector{Int64}(undef, R+1)
@inbounds for l = 1:L
if l in legacy_locations
legacy_locations_count_per_region[locations_regions_mapping[l]] += 1
end
locations_count_per_region[locations_regions_mapping[i]] += 1
locations_count_per_region[locations_regions_mapping[l]] += 1
end
ind_ones_incumbent = Dict([(r, Vector{Int64}(undef, n[r]-legacy_locations_count_per_region[r])) for r in regions])
ind_zeros_incumbent = Dict([(r, Vector{Int64}(undef, locations_count_per_region[r]-n[r])) for r in regions])
index_range_per_region[1] = 1
@inbounds for j = 1:P
index_range_per_region[j+1] = index_range_per_region[j] + locations_count_per_region[j]
@inbounds for r = 1:R
index_range_per_region[r+1] = index_range_per_region[r] + locations_count_per_region[r]
end
# Pre-allocate y-related arrays
......@@ -70,21 +70,21 @@ function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Flo
Dx_tmp = Array{Float64}(undef, W, 1)
# Initialise
ind_ones, counter_ones = findall(x_init .== 1.), zeros(Int64, P)
ind_ones, counter_ones = findall(x_init .== 1.), zeros(Int64, R)
Dx_incumbent .= sum(view(D, :, ind_ones), dims=2)[:,1]
filter!(a -> !(a in legacy_locations), ind_ones)
@inbounds for ind in ind_ones
p = locations_regions_mapping[ind]
counter_ones[p] += 1
ind_ones_incumbent[p][counter_ones[p]] = ind
r = locations_regions_mapping[ind]
counter_ones[r] += 1
ind_ones_incumbent[r][counter_ones[r]] = ind
end
y_incumbent .= Dx_incumbent .>= c
ind_zeros, counter_zeros = findall(x_init .== 0.), zeros(Int64, P)
ind_zeros, counter_zeros = findall(x_init .== 0.), zeros(Int64, R)
for ind in ind_zeros
p = locations_regions_mapping[ind]
counter_zeros[p] += 1
ind_zeros_incumbent[p][counter_zeros[p]] = ind
r = locations_regions_mapping[ind]
counter_zeros[r] += 1
ind_zeros_incumbent[r][counter_zeros[r]] = ind
end
ind_samples_per_region_tmp[1] = 1
......@@ -96,22 +96,24 @@ function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Flo
# Sample from neighbourhood
sample_count_per_region .= init_sample_count_per_region
@inbounds while sum(sample_count_per_region) < N
p = sample(regions)
if (sample_count_per_region[p] < n[p] - legacy_locations_count_per_region[p]) && (sample_count_per_region[p] < locations_count_per_region[p] - n[p] + legacy_locations_count_per_region[p])
sample_count_per_region[p] += 1
r = sample(regions)
if (sample_count_per_region[r] < n[r] - legacy_locations_count_per_region[r]) && (sample_count_per_region[r] < locations_count_per_region[r] - n[r])
sample_count_per_region[r] += 1
end
end
@inbounds for i = 1:P
ind_samples_per_region_tmp[i+1] = ind_samples_per_region_tmp[i] + sample_count_per_region[i]
if sample_count_per_region[i] != 0
view(ind_ones2zeros_tmp, ind_samples_per_region_tmp[i]:(ind_samples_per_region_tmp[i+1]-1)) .= sample(ind_ones_incumbent[i], sample_count_per_region[i], replace=false)
view(ind_zeros2ones_tmp, ind_samples_per_region_tmp[i]:(ind_samples_per_region_tmp[i+1]-1)) .= sample(ind_zeros_incumbent[i], sample_count_per_region[i], replace=false)
@inbounds for r = 1:R
ind_samples_per_region_tmp[r+1] = ind_samples_per_region_tmp[r] + sample_count_per_region[r]
if sample_count_per_region[r] != 0
view(ind_ones2zeros_tmp, ind_samples_per_region_tmp[r]:(ind_samples_per_region_tmp[r+1]-1)) .= sample(ind_ones_incumbent[r], sample_count_per_region[r], replace=false)
view(ind_zeros2ones_tmp, ind_samples_per_region_tmp[r]:(ind_samples_per_region_tmp[r+1]-1)) .= sample(ind_zeros_incumbent[r], sample_count_per_region[r], replace=false)
end
end
# Compute y and associated objective value
Dx_tmp .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_tmp), dims = 2) .- sum(view(D, :, ind_ones2zeros_tmp), dims = 2)
for j = 1:N
Dx_tmp .= Dx_incumbent .+ view(D, :, ind_zeros2ones_tmp[j]) .- view(D, :, ind_ones2zeros_tmp[j])
end
y_tmp .= Dx_tmp .>= c
# Update objective difference
......@@ -126,11 +128,13 @@ function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Flo
end
end
if delta_candidate > 0
@inbounds for i = 1:P
ind_ones_incumbent[i] .= union(setdiff(ind_ones_incumbent[i], view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
ind_zeros_incumbent[i] .= union(setdiff(ind_zeros_incumbent[i], view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
@inbounds for r = 1:R
ind_ones_incumbent[r] .= union(setdiff(ind_ones_incumbent[r], view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1))), view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1)))
ind_zeros_incumbent[r] .= union(setdiff(ind_zeros_incumbent[r], view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1))), view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1)))
end
for j = 1:N
Dx_tmp .= Dx_incumbent .+ view(D, :, ind_zeros2ones_tmp[j]) .- view(D, :, ind_ones2zeros_tmp[j])
end
Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2)
y_incumbent .= Dx_incumbent .>= c
else
T = T_init * exp(-10*i/I)
......@@ -138,18 +142,20 @@ function simulated_annealing_local_search_partition(D::Array{Float64, 2}, c::Flo
d = Binomial(1, p)
b = rand(d)
if b == 1
@inbounds for i = 1:P
ind_ones_incumbent[i] .= union(setdiff(ind_ones_incumbent[i], view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
ind_zeros_incumbent[i] .= union(setdiff(ind_zeros_incumbent[i], view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1))), view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[i]:(ind_samples_per_region_candidate[i+1]-1)))
@inbounds for r = 1:R
ind_ones_incumbent[r] .= union(setdiff(ind_ones_incumbent[r], view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1))), view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1)))
ind_zeros_incumbent[r] .= union(setdiff(ind_zeros_incumbent[r], view(ind_zeros2ones_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1))), view(ind_ones2zeros_candidate, ind_samples_per_region_candidate[r]:(ind_samples_per_region_candidate[r+1]-1)))
end
for j = 1:N
Dx_tmp .= Dx_incumbent .+ view(D, :, ind_zeros2ones_tmp[j]) .- view(D, :, ind_ones2zeros_tmp[j])
end
Dx_incumbent .= Dx_incumbent .+ sum(view(D, :, ind_zeros2ones_candidate), dims = 2) .- sum(view(D, :, ind_ones2zeros_candidate), dims = 2)
y_incumbent .= Dx_incumbent .>= c
end
end
end
@inbounds for i in 1:P
x_incumbent[ind_ones_incumbent[i]] .= 1.
x_incumbent[ind_zeros_incumbent[i]] .= 0.
@inbounds for r in 1:R
x_incumbent[ind_ones_incumbent[r]] .= 1.
x_incumbent[ind_zeros_incumbent[r]] .= 0.
end
LB = sum(y_incumbent)
return x_incumbent, LB, obj
......
......@@ -3,7 +3,8 @@ import julia
from os.path import join
from numpy import argmax
from helpers import read_inputs, init_folder, xarray_to_ndarray, generate_jl_input, get_deployment_vector
from helpers import read_inputs, init_folder, xarray_to_ndarray, generate_jl_input, \
get_potential_per_site, capacity_to_cardinality
from tools import read_database, return_filtered_coordinates, selected_data, return_output, resource_quality_mapping, \
critical_window_mapping, sites_position_mapping, retrieve_location_dict, retrieve_site_data
......@@ -22,20 +23,21 @@ if __name__ == '__main__':
data_path = model_parameters['data_path']
spatial_resolution = model_parameters['spatial_resolution']
time_horizon = model_parameters['time_slice']
deployment_dict = get_deployment_vector(model_parameters['regions'],
model_parameters['technologies'],
model_parameters['deployments'])
database = read_database(data_path, spatial_resolution)
site_coordinates, legacy_coordinates = return_filtered_coordinates(database, model_parameters, tech_parameters)
truncated_data = selected_data(database, site_coordinates, time_horizon)
capacity_factors_data = return_output(truncated_data, data_path)
time_windows_data = resource_quality_mapping(capacity_factors_data, siting_parameters)
criticality_data = xarray_to_ndarray(critical_window_mapping(time_windows_data, model_parameters))
site_positions = sites_position_mapping(time_windows_data)
deployment_dict = capacity_to_cardinality(database, model_parameters, tech_parameters,
site_coordinates, legacy_coordinates)
site_potential_data = get_potential_per_site(time_windows_data, tech_parameters, spatial_resolution)
criticality_data = xarray_to_ndarray(critical_window_mapping(time_windows_data, site_potential_data,
deployment_dict, model_parameters))
jl_dict = generate_jl_input(deployment_dict, site_coordinates, site_positions, legacy_coordinates)
logger.info('Data pre-processing finished. Opening Julia instance.')
......@@ -86,7 +88,7 @@ if __name__ == '__main__':
jl_locations_vector = jl_sel[jl_objective_pick, :]
locations_dict = retrieve_location_dict(jl_locations_vector, model_parameters, site_positions)
retrieve_site_data(model_parameters, capacity_factors_data, criticality_data,
retrieve_site_data(model_parameters, capacity_factors_data, criticality_data, deployment_dict,
site_positions, locations_dict, legacy_coordinates, output_folder, benchmark='PROD')
logger.info(f"Results written to {output_folder}")
......@@ -19,7 +19,11 @@ from windpowerlib import power_curves, wind_speed
from helpers import filter_onshore_offshore_locations, union_regions, return_coordinates_from_shapefiles, \
concatenate_dict_keys, return_dict_keys, chunk_split, collapse_dict_region_level, read_inputs, \
retrieve_load_data_partitions, get_partition_index, return_region_divisions, get_deployment_vector
smooth_load_data, get_partition_index, return_region_divisions, norm_load_by_deployments, norm_load_by_load
import logging
logging.basicConfig(level=logging.INFO, format=f"%(levelname)s %(asctime)s - %(message)s", datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
def read_database(data_path, spatial_resolution):
......@@ -331,9 +335,6 @@ def return_filtered_coordinates(dataset, model_params, tech_params):
"""
technologies = model_params['technologies']
regions = model_params['regions']
deployment_dict = get_deployment_vector(model_params['regions'],
model_params['technologies'],
model_params['deployments'])
output_dict = {region: {tech: None for tech in technologies} for region in regions}
coordinates_dict = {key: None for key in technologies}
......@@ -372,20 +373,10 @@ def return_filtered_coordinates(dataset, model_params, tech_params):
shape_region = union_regions([region], model_params['data_path'], which=tech_dict['where'])
points_in_region = return_coordinates_from_shapefiles(dataset, shape_region)
legacy_in_region = list(set(legacy_dict[tech]).intersection(set(points_in_region)))
assert len(legacy_in_region) <= deployment_dict[region][tech], \
f"More legacy sites ({len(legacy_in_region)}) than desired deployments " \
f"({deployment_dict[region][tech]}) in {region}. Revise assumptions."
points_to_keep = list(set(coordinates_dict[tech]).intersection(set(points_in_region)))
output_dict[region][tech] = [p for p in points_to_keep if p not in unique_list_of_points]
unique_list_of_points.extend(points_to_keep)
assert deployment_dict[region][tech] <= len(output_dict[region][tech]), \
f"Not enough candidate {tech} sites ({len(output_dict[region][tech])}) " \
f"for the desired deployments ({deployment_dict[region][tech]}) in {region}. Revise assumptions."
# print(f"{len(output_dict[region][tech])} {tech} sites in {region}.")
for key, value in output_dict.items():
output_dict[key] = {k: v for k, v in output_dict[key].items() if len(v) > 0}
......@@ -580,7 +571,7 @@ def return_output(input_dict, data_path, smooth_wind_power_curve=True):
def resource_quality_mapping(input_dict, siting_params):
delta = siting_params['delta']
measure = siting_params['smooth_measure']
measure = siting_params['alpha']['smoothing']
assert measure in ['mean', 'median'], f"Measure {measure} not available."
......@@ -610,40 +601,68 @@ def resource_quality_mapping(input_dict, siting_params):
return output_dict
def critical_window_mapping(input_dict, model_params):
def critical_window_mapping(time_windows_dict, potentials_dict, deployments_dict, model_params):
regions = model_params['regions']
date_slice = model_params['time_slice']
alpha = model_params['siting_params']['alpha']
delta = model_params['siting_params']['delta']
norm_type = model_params['siting_params']['norm_type']
data_path = model_params['data_path']
key_list = return_dict_keys(input_dict)
output_dict = deepcopy(input_dict)
key_list = return_dict_keys(time_windows_dict)
output_dict = deepcopy(time_windows_dict)
assert alpha in ['load_central', 'load_partition'], f"Criticality definition {alpha} not available."
assert alpha['method'] in ['load', 'potential'], f"Criticality definition based on {alpha['method']} not available."
assert alpha['coverage'] in ['partition', 'system'], f"Criticality coverage {alpha['coverage']} not available."
assert alpha['norm'] in ['min', 'max'], f"Norm {alpha['norm']} not available."
if alpha == 'load_central':
load_ds = smooth_load_data(data_path, regions, date_slice, delta)
l_norm = retrieve_load_data_partitions(data_path, date_slice, alpha, delta, regions, norm_type)
# Flip axes
alpha_reference = l_norm[:, newaxis]
if alpha['coverage'] == 'system':
for region, tech in key_list:
critical_windows = (input_dict[region][tech] > alpha_reference).astype(int)
output_dict[region][tech] = critical_windows
load_ds_system = load_ds.sum(axis=1)
if alpha['method'] == 'potential':
deployments = sum(deployments_dict[key][subkey] for key in deployments_dict
for subkey in deployments_dict[key])
l_norm = norm_load_by_deployments(load_ds_system, deployments)
# Flip axes
l_norm = l_norm.values[:, newaxis]
for region, tech in key_list:
measure = time_windows_dict[region][tech] * potentials_dict[region][tech]
output_dict[region][tech] = (measure > l_norm).astype(int)
elif alpha == 'load_partition':
else:
l_norm = norm_load_by_load(load_ds_system, alpha['norm'])
# Flip axes
l_norm = l_norm.values[:, newaxis]
for region, tech in key_list:
output_dict[region][tech] = (time_windows_dict[region][tech] > l_norm).astype(int)
elif alpha['coverage'] == 'partition':
for region, tech in key_list:
l_norm = retrieve_load_data_partitions(data_path, date_slice, alpha, delta, region, norm_type)
# Flip axes.
alpha_reference = l_norm[:, newaxis]
# Select region of interest within the dict value with 'tech' key.
critical_windows = (input_dict[region][tech] > alpha_reference).astype(int)
output_dict[region][tech] = critical_windows
load_ds_region = load_ds[region]
if alpha['method'] == 'potential':
deployments = sum(deployments_dict[key][subkey] for key in deployments_dict
for subkey in deployments_dict[key] if key == region)
l_norm = norm_load_by_deployments(load_ds_region, deployments)
# Flipping axes.
l_norm = l_norm.values[:, newaxis]
measure = time_windows_dict[region][tech] * potentials_dict[region][tech]
output_dict[region][tech] = (measure > l_norm).astype(int)
else:
l_norm = norm_load_by_load(load_ds_region, alpha['norm'])
# Flipping axes.
l_norm = l_norm.values[:, newaxis]
output_dict[region][tech] = (time_windows_dict[region][tech] > l_norm).astype(int)
return output_dict
......@@ -693,12 +712,9 @@ def retrieve_index_dict(deployment_vector, coordinate_dict):
return n, dict_deployment, partitions, indices
def retrieve_site_data(model_parameters, capacity_factor_data, criticality_data,
def retrieve_site_data(model_parameters, capacity_factor_data, criticality_data, deployment_dict,
location_mapping, comp_site_coordinates, legacy_sites, output_folder, benchmark):
deployment_dict = get_deployment_vector(model_parameters['regions'],
model_parameters['technologies'],
model_parameters['deployments'])
c = model_parameters['siting_params']['c']
output_by_tech = collapse_dict_region_level(capacity_factor_data)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment