Skip to content
Snippets Groups Projects
Commit 2337d30a authored by David Radu's avatar David Radu
Browse files

Major update, merging with locals.

parent d705560c
No related branches found
No related tags found
No related merge requests found
......@@ -2,92 +2,63 @@
spatial_resolution: 0.5
# Path towards the various input data.
path_resource_data: '../input_data/resource_data/'
path_transfer_function_data: '../input_data/transfer_functions/'
path_population_density_data: '../input_data/population_density/'
path_protected_areas_data: '../input_data/protected_areas/'
path_landseamask: '../input_data/land_mask/'
path_load_data: '../input_data/load_data/'
path_bus_data: '../input_data/transmission_data/'
path_orography_data: '../input_data/land_mask/'
path_resource_data: '../input_data/resource_data'
path_transfer_function_data: '../input_data/transfer_functions'
path_population_density_data: '../input_data/population_density'
path_protected_areas_data: '../input_data/protected_areas'
path_land_data: '../input_data/land_data'
path_load_data: '../input_data/load_data'
path_transmission_data: '../input_data/transmission_data'
path_potential_data: '../input_data/potentials'
path_legacy_data : '../input_data/legacy'
path_shapefile_data: '../input_data/shapefiles'
# Various data layers to be taken into account in potential site selection.
resource_quality_layer: True
population_density_layer: True
protected_areas_layer: False
bathymetry_layer: True
orography_layer: True
forestry_layer: True
# Threshold above which no generation capacity is deployed.
population_density_threshold: 150.
# Selected protected areas. Check here for details: http://datasets.wri.org/dataset/64b69c0fb0834351bd6c0ceb3744c5ad
protected_areas_selection: ['Ia', 'Ib', 'II', 'Not Applicable']
# Distance threshold from protected areas (nothing built closer than this value in km).
protected_areas_threshold: 20.
# Depth threshold for offshore locations.
depth_threshold: 100.
# Altitude threshold for onshore locations.
altitude_threshold: 1500.
# Slope threshold for onshore locations (irrespective of the underlying resource).
slope_threshold: 0.03
# Forestry threshold (as share [0.0 - 1.0] of forestry within the given ERA5 node.
forestry_threshold: 0.8
water_mask_layer: True
latitude_layer: True
legacy_layer: True
distance_layer: True
# Start time and end time of the analysis.
time_slice: ['2013-01-01T00:00', '2013-01-31T23:00']
time_slice: ['2015-01-01T00:00', '2015-01-31T23:00']
# List of regions to be considered in the optimization. To note that a list of pre-defined regions is available
# in tools.py, within the `return_coordinates_from_countries` function. Yet, if additional regions are defined,
# an associated load signal should be assigned in the `read_load_data` function.
regions: ['US']
# Consideration of offshore points associated with these regions.
add_offshore: True
regions: ['DE']
# Technologies to deploy. Must have a 'RESOURCE_CONVERTER' structure. Only two technologies available for now.
technologies: ['wind_aerodyn', 'solar_tallmaxm']
technologies: ['wind_onshore', 'wind_offshore']
# Assessment measure for each time window. Available: mean, median or percentiles.
resource_quality_measure: 'mean'
# Defines how \alpha is considered in space and time. (load_based, uniform, percentile)
alpha_rule: 'time_dependent'
# For the load-dependent \alpha, it sets how regions are aggregated. (centralized, partitioned)
alpha_plan: 'centralized'
smooth_measure: 'mean'
# Defines how \alpha is considered in space and time.
alpha: 'load_central'
# Normalization procedures (detailed in tools.py). (min, max)
alpha_norm: 'min'
# Capacity factor threshold used to compute the criticality indicators. If given as 'float', is uniform across regions. If percentile, it's assigned location-wise.
alpha_numerical: 0.3
norm_type: 'min'
# Time-window length used to compute the criticality indicator. Integer value.
delta: 1
# Geographical coverage threshold used to compute the criticality indicator. Float value between 0.0 and 1.0
beta: 0.9
beta: 0.8
# Choice of solver. Available: 'gurobi' and 'cplex'.
solver: 'gurobi'
# MIP gap --- 0.01 = 1%
mipgap: 0.05
timelimit: 1800
threads: 0
# Type of problem to be solved. Check models.py for a full list.
main_problem: 'Covering'
# Objective of the problem. Check models.py for a full list.
main_objective: 'cardinality_floor'
# Solution method: None, Projection, Lagrange
# Solution method: None, ASM, Warm, Heuristic, etc.
solution_method: 'None'
# Langrangian subproblem: Inexact/Exact
subgradient_approximation: 'Inexact'
# Number of Lagrangian iterations. Positive integer value.
no_iterations_Lagrange: 1000
# Number of Simulated Annealing iterations. Positive integer value.
no_iterations_SA: 500
# Number of explorations within an epoque. Positive integer value.
no_explorations_SA: 1000
# List of deployments per region (ordered as the 'regions' list above)
cardinality_constraint: [20]
# Capacity budget in GW. Float value.
capacity_constraint: 200.0
# Monetary budget in units. Float value.
cost_budget: 4000.0
# Dict of deployments per partition (ordered as the 'regions' list above)
deployment_vector: {'DE':{'wind_onshore': 10, 'wind_offshore':3}}
# Keeping files at the end of the run.
keep_files: True
# Choosing a low-memory alternative for model construction.
low_memory: False
keep_files: True
\ No newline at end of file
# Config file for various conversion technologies.
wind_onshore:
converter_IV: 'V110'
converter_III: 'V90'
converter_II: 'V90'
converter_I: 'V105'
resource: 'wind'
deployment: 'onshore'
resource_threshold: 5. #m/s
population_density_threshold_low: 0.
population_density_threshold_high: 100.
protected_areas_selection: ['Ia', 'Ib', 'II']
protected_areas_distance_threshold: 10.
depth_threshold_low: 0.
depth_threshold_high: 0.
altitude_threshold: 1500.
terrain_slope_threshold: 0.03
forestry_ratio_threshold: 0.7
latitude_threshold: 65.
wind_offshore:
converter_IV: 'V90'
converter_III: 'V90'
converter_II: 'V164'
converter_I: 'V164'
resource: 'wind'
deployment: 'offshore'
resource_threshold: 7. #m/s
population_density_threshold_low: 0.
population_density_threshold_high: 100.
protected_areas_selection: ['Ia', 'Ib', 'II', 'V']
protected_areas_distance_threshold: 5.
depth_threshold_low: 5.
depth_threshold_high: 299.
altitude_threshold: 0.
terrain_slope_threshold: 1.
forestry_ratio_threshold: 1.
latitude_threshold: 65.
distance_threshold_min: 23.
distance_threshold_max: 111.
wind_floating:
converter_IV: 'V90'
converter_III: 'V90'
converter_II: 'V164'
converter_I: 'V164'
resource: 'wind'
deployment: 'floating'
resource_threshold: 9. #m/s
population_density_threshold_low: 0.
population_density_threshold_high: 100.
protected_areas_selection: ['Ia', 'Ib', 'II', 'V']
protected_areas_distance_threshold: 5.
depth_threshold_low: 200.
depth_threshold_high: 990.
altitude_threshold: 0.
terrain_slope_threshold: 1.
forestry_ratio_threshold: 1.
latitude_threshold: 65.
distance_threshold_min: 23.
distance_threshold_max: 180.
solar_utility:
converter: 'DEG15MC'
resource: 'solar'
deployment: 'utility'
resource_threshold: 130. #W/m2
population_density_threshold_low: 0.
population_density_threshold_high: 100.
protected_areas_selection: ['Ia', 'Ib', 'II', 'V']
protected_areas_distance_threshold: 5.
depth_threshold_low: 0.
depth_threshold_high: 0.
altitude_threshold: 2000.
terrain_slope_threshold: 0.05
forestry_ratio_threshold: 0.8
latitude_threshold: 65.
solar_residential:
converter: 'DD06M'
resource: 'solar'
deployment: 'residential'
resource_threshold: 100. #W/m2
population_density_threshold_low: 10.
population_density_threshold_high: 999999.
protected_areas_selection: ['Ia', 'Ib', 'II', 'V']
protected_areas_distance_threshold: 1.
depth_threshold_low: 0.
depth_threshold_high: 0.
altitude_threshold: 3000.
terrain_slope_threshold: 1.
forestry_ratio_threshold: 1.
latitude_threshold: 65.
name: resite
channels:
- conda-forge
- defaults
dependencies:
- bottleneck
- cdsapi
......@@ -15,4 +16,7 @@ dependencies:
- pyyaml
- xlrd
- tqdm
- joblib
\ No newline at end of file
- joblib
- geopy
- pypsa
- shapely
\ No newline at end of file
This diff is collapsed.
import sys
import models as gg
import tools as tl
from pyomo.opt import SolverFactory
from shutil import copy
from os.path import join
import pickle
from tqdm import tqdm
sys.tracebacklimit = 100
from src.helpers import read_inputs, init_folder, custom_log
from src.models import preprocess_input_data, build_model
from src.tools import new_cost_rule, retrieve_location_dict, retrieve_site_data, retrieve_max_run_criticality, \
retrieve_lower_bound, retrieve_y_idx, build_init_multiplier, retrieve_next_multiplier, retrieve_upper_bound
def main():
parameters = tl.read_inputs('parameters.yml')
parameters = read_inputs('../config_model.yml')
keepfiles = parameters['keep_files']
lowmem = parameters['low_memory']
output_folder = tl.init_folder(keepfiles)
output_folder = init_folder(keepfiles)
copy('../config_model.yml', output_folder)
# Copy parameter file to the output folder for future reference.
copy('parameters.yml', output_folder)
problem = parameters['main_problem']
objective = parameters['main_objective']
solution_method = parameters['solution_method']
iterations_Lagrange = parameters['no_iterations_Lagrange']
iterations_SA = parameters['no_iterations_SA']
explorations_SA = parameters['no_explorations_SA']
subgradient = parameters['subgradient_approximation']
solver = parameters['solver']
MIPGap = parameters['mipgap']
TimeLimit = parameters['timelimit']
Threads = parameters['threads']
# technologies = parameters['technologies']
technologies = parameters['technologies']
input_dict = gg.preprocess_input_data(parameters)
input_dict = preprocess_input_data(parameters)
# Solver options for the MIP problem
opt = SolverFactory(solver)
opt.options['MIPGap'] = MIPGap
opt.options['Threads'] = 0
opt.options['TimeLimit'] = 3600
opt.options['DisplayInterval'] = 600
opt.options['Threads'] = Threads
opt.options['TimeLimit'] = TimeLimit
# Solver options for the relaxations.
opt_relaxation = SolverFactory(solver)
opt_relaxation.options['MIPGap'] = 0.02
# Solver options for the integer Lagrangian
opt_integer = SolverFactory(solver)
opt_integer.options['MIPGap'] = MIPGap
opt_integer.options['Threads'] = 0
opt_integer.options['TimeLimit'] = 18000
opt_integer.options['Threads'] = Threads
opt_integer.options['TimeLimit'] = TimeLimit
opt_integer.options['MIPFocus'] = 3
opt_integer.options['DisplayInterval'] = 600
# Solver options for the iterative procedure
opt_persistent = SolverFactory('gurobi_persistent')
opt_persistent.options['mipgap'] = 0.02
if solution_method == 'None':
if problem == 'Covering':
instance = gg.build_model(input_dict, problem, objective, output_folder,
low_memory=lowmem, write_lp=False)
tl.custom_log(' Sending model to solver.')
results = opt.solve(instance, tee=True, keepfiles=False, report_timing=False,
logfile=join(output_folder, 'solver_log.log'),)
optimal_locations = tl.retrieve_optimal_locations(instance, input_dict['critical_window_matrix'],
technologies, problem)
elif problem == 'Load':
raise ValueError(' This problem should be solved with a solution method.')
else:
raise ValueError(' This problem does not exist.')
elif solution_method == 'Projection':
if problem == 'Covering':
instance = gg.build_model_relaxation(input_dict, formulation='PartialConvex', low_memory=lowmem)
elif problem == 'Load':
instance = gg.build_model(input_dict, problem, objective, output_folder,
low_memory=lowmem)
else:
raise ValueError(' This problem does not exist.')
tl.custom_log(' Solving...')
results = opt.solve(instance, logfile=join(output_folder, 'solver_log.log'),
tee=True, keepfiles=False, report_timing=False)
tl.custom_log(' Relaxation solved and passed to the projection problem.')
tl.retrieve_feasible_solution_projection(input_dict, instance, results, problem)
optimal_locations = tl.retrieve_optimal_locations(instance,
input_dict['critical_window_matrix'],
technologies,
problem)
elif solution_method == 'Lagrange':
# Initialize some list objects to be dumped in the output dict.
# objective_list = []
# gap_list = []
# multiplier_list = []
# Build PCR to extract i) the set of ys to dualize
instance_pcr = gg.build_model_relaxation(input_dict, formulation='PartialConvex', low_memory=lowmem)
tl.custom_log(' Solving the partial convex relaxation...')
results_pcr = opt_relaxation.solve(instance_pcr,
tee=False, keepfiles=False, report_timing=False)
lb = tl.retrieve_lower_bound(input_dict, instance_pcr,
method='SimulatedAnnealing', multiprocess=False,
N = 1, T_init=100., no_iter = iterations_SA, no_epoch = explorations_SA)
# Build sets of constraints to keep and dualize, respectively.
ys_dual, ys_keep = tl.retrieve_y_idx(instance_pcr, share_random_keep=0.2)
# Build (random sampling within range) initial multiplier (\lambda_0). Affects the estimation of the first UB.
init_multiplier = tl.build_init_multiplier(ys_dual, range=0.5)
# Build PCLR/ILR within the "persistent" interface.
instance_Lagrange = gg.build_model_relaxation(input_dict, formulation='Lagrangian',
subgradient_method=subgradient,
y_dual=ys_dual, y_keep=ys_keep,
multiplier=init_multiplier, low_memory=lowmem)
opt_persistent.set_instance(instance_Lagrange)
tl.custom_log(' Solving the Lagrangian relaxations...')
# Iteratively solve and post-process data.
for i in tqdm(range(1, iterations_Lagrange + 1), desc='Lagrangean Relaxation Loop'):
results_Lagrange = opt_persistent.solve(tee=False, keepfiles=False, report_timing=False)
# multiplier_list.append(array(list(init_multiplier.values())))
# objective_list.append(tl.retrieve_upper_bound(results_lagrange))
# gap_list.append(round((tl.retrieve_upper_bound(results_lagrange) - lb) / lb * 100., 2))
# Compute next multiplier.
incumbent_multiplier = tl.retrieve_next_multiplier(instance_Lagrange, init_multiplier,
ys_keep, ys_dual, i, iterations_Lagrange,
subgradient, a=0.01, share_of_iter=0.8)
# Replace multiplier, hence the objective.
instance_Lagrange.del_component(instance_Lagrange.objective)
instance_Lagrange.objective = tl.new_cost_rule(instance_Lagrange,
ys_keep, ys_dual,
incumbent_multiplier,
low_memory=lowmem)
opt_persistent.set_objective(instance_Lagrange.objective)
# Assign new value to init_multiplier to use in the iterative multiplier calculation.
init_multiplier = incumbent_multiplier
# Create nd array from list of 1d multipliers from each iteration.
# multiplier_array = tl.concatenate_and_filter_arrays(multiplier_list, ys_keep)
if subgradient == 'Inexact':
# Build and solve ILP with the incumbent_multiplier.
instance_integer = gg.build_model_relaxation(input_dict, formulation='Lagrangian',
subgradient_method='Exact', y_dual=ys_dual, y_keep=ys_keep,
multiplier=incumbent_multiplier, low_memory=lowmem)
tl.custom_log(' Solving the integer Lagrangian problem...')
results = opt_integer.solve(instance_integer, logfile=join(output_folder, 'solver_log.log'),
tee=True, keepfiles=False, report_timing=False)
tl.custom_log(' UB = {}, LB = {}, gap = {}%'.format(tl.retrieve_upper_bound(results),
lb,
round((tl.retrieve_upper_bound(results) - lb) / lb * 100., 2)))
optimal_locations = tl.retrieve_optimal_locations(instance_integer, input_dict['critical_window_matrix'],
technologies, problem)
else:
tl.custom_log(' UB = {}, LB = {}, gap = {}%'.format(tl.retrieve_upper_bound(results_Lagrange),
lb,
round((tl.retrieve_upper_bound(results_Lagrange) - lb) / lb * 100., 2)))
optimal_locations = tl.retrieve_optimal_locations(instance_Lagrange, input_dict['critical_window_matrix'],
technologies, problem)
else:
raise ValueError(' This solution method is not available. Retry.')
output_dict = {k: v for k, v in input_dict.items() if k in ('region_list', 'coordinates_dict', 'technologies',
'capacity_factors_dict', 'critical_window_matrix',
'capacity_potential_per_node')}
output_dict.update({'optimal_location_dict': optimal_locations})
if (problem == 'Covering' and 'capacities' in objective) or (problem == 'Load' and objective == 'following'):
deployed_capacities = tl.retrieve_deployed_capacities(instance, technologies,
input_dict['capacity_potential_per_node'])
output_dict.update({'deployed_capacities_dict': deployed_capacities})
# counter = 0
# for k in instance.W:
# counter += instance.y[k].value
# print ('Time window part is: {}'.format(counter))
# print ('Penalty term part is: {}'.format(round(abs(instance.objective()) - counter, 2)))
pickle.dump(output_dict, open(join(output_folder, 'output_model.p'), 'wb'))
tl.remove_garbage(keepfiles, output_folder)
custom_log(' BB chosen to solve the IP.')
instance, indices = build_model(parameters, input_dict, output_folder, write_lp=False)
custom_log(' Sending model to solver.')
results = opt.solve(instance, tee=True, keepfiles=False, report_timing=False,
logfile=join(output_folder, 'solver_log.log'),)
comp_location_dict = retrieve_location_dict(instance, parameters, input_dict, indices)
max_location_dict = retrieve_site_data(parameters, input_dict, output_folder, comp_location_dict)
no_windows_max = retrieve_max_run_criticality(max_location_dict, input_dict, parameters)
print('Number of non-critical windows for the MAX case: ', no_windows_max)
# elif solution_method == 'ASM':
#
# iterations_Lagrange = 500
# subgradient = 'Inexact'
#
# # Build PCR to extract i) the set of ys to dualize
# instance_pcr = build_model_relaxation(parameters, input_dict, formulation='PartialConvex')
#
# custom_log(' Solving the partial convex relaxation...')
# results_pcr = opt_relaxation.solve(instance_pcr,
# tee=False, keepfiles=False, report_timing=False)
#
# lb = retrieve_lower_bound(input_dict, instance_pcr,
# method='SimulatedAnnealing', multiprocess=False,
# N = 1, T_init=100., no_iter = 100, no_epoch = 500)
#
# # Build sets of constraints to keep and dualize, respectively.
# ys_dual, ys_keep = retrieve_y_idx(instance_pcr, share_random_keep=0.2)
#
# # Build (random sampling within range) initial multiplier (\lambda_0). Affects the estimation of the first UB.
# init_multiplier = build_init_multiplier(ys_dual, range=0.5)
#
# # Build PCLR/ILR within the "persistent" interface.
# instance_Lagrange = build_model_relaxation(input_dict, formulation='Lagrangian',
# subgradient_method=subgradient,
# y_dual=ys_dual, y_keep=ys_keep,
# multiplier=init_multiplier)
# opt_persistent.set_instance(instance_Lagrange)
#
# custom_log(' Solving the Lagrangian relaxations...')
# # Iteratively solve and post-process data.
# for i in tqdm(range(1, iterations_Lagrange + 1), desc='Lagrangean Relaxation Loop'):
#
# results_Lagrange = opt_persistent.solve(tee=False, keepfiles=False, report_timing=False)
#
# # Compute next multiplier.
# incumbent_multiplier = retrieve_next_multiplier(instance_Lagrange, init_multiplier,
# ys_keep, ys_dual, i, iterations_Lagrange,
# subgradient, a=0.01, share_of_iter=0.8)
#
# # Replace multiplier, hence the objective.
# instance_Lagrange.del_component(instance_Lagrange.objective)
# instance_Lagrange.objective = new_cost_rule(instance_Lagrange,
# ys_keep, ys_dual,
# incumbent_multiplier)
# opt_persistent.set_objective(instance_Lagrange.objective)
#
# # Assign new value to init_multiplier to use in the iterative multiplier calculation.
# init_multiplier = incumbent_multiplier
#
# # Create nd array from list of 1d multipliers from each iteration.
# # multiplier_array = concatenate_and_filter_arrays(multiplier_list, ys_keep)
#
# # Build and solve ILP with the incumbent_multiplier.
# instance_integer = build_model_relaxation(input_dict, formulation='Lagrangian',
# subgradient_method='Exact', y_dual=ys_dual, y_keep=ys_keep)
#
# custom_log(' Solving the integer Lagrangian problem...')
# results = opt_integer.solve(instance_integer, logfile=join(output_folder, 'solver_log.log'),
# tee=True, keepfiles=False, report_timing=False)
#
# custom_log(' UB = {}, LB = {}, gap = {}%'.format(retrieve_upper_bound(results),
# lb,
# round((retrieve_upper_bound(results) - lb) / lb * 100., 2)))
# optimal_locations = tl.retrieve_optimal_locations(instance_integer, input_dict['critical_window_matrix'],
# technologies, problem)
#
#
#
# elif solution_method == 'Heuristic':
# #TODO: set this up.
# pass
#
# elif solution_method == 'Warmstart':
# #TODO: set this up.
# pass
#
# else:
#
# raise ValueError(' This solution method is not available. Retry.')
#
#
# output_dict = {k: v for k, v in input_dict.items() if k in ('region_list', 'coordinates_dict', 'technologies',
# 'capacity_factors_dict', 'critical_window_matrix',
# 'capacity_potential_per_node')}
#
# output_dict.update({'optimal_location_dict': optimal_locations})
#
# if (problem == 'Covering' and 'capacities' in objective) or (problem == 'Load' and objective == 'following'):
# deployed_capacities = tl.retrieve_deployed_capacities(instance, technologies,
# input_dict['capacity_potential_per_node'])
# output_dict.update({'deployed_capacities_dict': deployed_capacities})
#
# pickle.dump(output_dict, open(join(output_folder, 'output_model.p'), 'wb'))
#
# tl.remove_garbage(keepfiles, output_folder)
if __name__ == "__main__":
......
This diff is collapsed.
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment