Skip to content
Snippets Groups Projects
Commit b508febf authored by David Radu's avatar David Radu
Browse files

First commit.

parents
No related branches found
No related tags found
No related merge requests found
input_data
output_data
resource_data
*.lp
*.log
*.pyc
src/__pycache*
.idea
\ No newline at end of file
bottleneck
cdsapi
dask
matplotlib
Pyomo
scipy
toolz
xarray
geopandas
cartopy
pyyaml
xlrd
tqdm
joblib
git+https://github.com/PyPSA/PyPSA.git
\ No newline at end of file
import sys
import models as gg
import tools as tl
from pyomo.opt import SolverFactory
from shutil import copy
from os.path import join
import pickle
from tqdm import tqdm
sys.tracebacklimit = 100
def main():
parameters = tl.read_inputs('parameters.yml')
keepfiles = parameters['keep_files']
lowmem = parameters['low_memory']
output_folder = tl.init_folder(keepfiles, 'O')
# Copy parameter file to the output folder for future reference.
copy('parameters.yml', output_folder)
problem = parameters['main_problem']
objective = parameters['main_objective']
solution_method = parameters['solution_method']
iterations_Lagrange = parameters['no_iterations_Lagrange']
iterations_SA = parameters['no_iterations_SA']
explorations_SA = parameters['no_explorations_SA']
subgradient = parameters['subgradient_approximation']
solver = parameters['solver']
MIPGap = parameters['mipgap']
technologies = parameters['technologies']
input_dict = gg.preprocess_input_data(parameters)
if solver == 'gurobi':
opt = SolverFactory(solver)
opt.options['MIPGap'] = MIPGap
opt.options['Threads'] = 0
opt.options['TimeLimit'] = 3600
opt.options['DisplayInterval'] = 600
elif solver == 'cplex':
opt = SolverFactory(solver)
opt.options['mipgap'] = MIPGap
opt.options['threads'] = 0
opt.options['mip_limits_treememory'] = 1e3
# Solver options for the relaxations.
opt_relaxation = SolverFactory(solver)
opt_relaxation.options['MIPGap'] = 0.02
# Solver options for the integer Lagrangian
opt_integer = SolverFactory(solver)
opt_integer.options['MIPGap'] = MIPGap
opt_integer.options['Threads'] = 0
opt_integer.options['TimeLimit'] = 18000
opt_integer.options['MIPFocus'] = 3
opt_integer.options['DisplayInterval'] = 600
# Solver options for the iterative procedure
opt_persistent = SolverFactory('gurobi_persistent')
opt_persistent.options['mipgap'] = 0.02
if solution_method == 'None':
if problem == 'Covering':
instance = gg.build_model(input_dict, problem, objective, output_folder,
low_memory=lowmem, write_lp=False)
tl.custom_log(' Sending model to solver.')
results = opt.solve(instance, tee=True, keepfiles=False, report_timing=False,
logfile=join(output_folder, 'solver_log.log'),)
optimal_locations = tl.retrieve_optimal_locations(instance, input_dict['critical_window_matrix'],
technologies, problem)
elif problem == 'Load':
raise ValueError(' This problem should be solved with a solution method.')
else:
raise ValueError(' This problem does not exist.')
elif solution_method == 'Projection':
if problem == 'Covering':
instance = gg.build_model_relaxation(input_dict, formulation='PartialConvex', low_memory=lowmem)
elif problem == 'Load':
instance = gg.build_model(input_dict, problem, objective, output_folder,
low_memory=lowmem)
else:
raise ValueError(' This problem does not exist.')
tl.custom_log(' Solving...')
results = opt.solve(instance, logfile=join(output_folder, 'solver_log.log'),
tee=True, keepfiles=False, report_timing=False)
tl.custom_log(' Relaxation solved and passed to the projection problem.')
tl.retrieve_feasible_solution_projection(input_dict, instance, results, problem)
optimal_locations = tl.retrieve_optimal_locations(instance,
input_dict['critical_window_matrix'],
technologies,
problem)
elif solution_method == 'Lagrange':
# Initialize some list objects to be dumped in the output dict.
# objective_list = []
# gap_list = []
# multiplier_list = []
# Build PCR to extract i) the set of ys to dualize
instance_pcr = gg.build_model_relaxation(input_dict, formulation='PartialConvex', low_memory=lowmem)
tl.custom_log(' Solving the partial convex relaxation...')
results_pcr = opt_relaxation.solve(instance_pcr,
tee=False, keepfiles=False, report_timing=False)
lb = tl.retrieve_lower_bound(input_dict, instance_pcr,
method='SimulatedAnnealing', multiprocess=False,
N = 1, T_init=100., no_iter = iterations_SA, no_epoch = explorations_SA)
# Build sets of constraints to keep and dualize, respectively.
ys_dual, ys_keep = tl.retrieve_y_idx(instance_pcr, share_random_keep=0.2)
# Build (random sampling within range) initial multiplier (\lambda_0). Affects the estimation of the first UB.
init_multiplier = tl.build_init_multiplier(ys_dual, range=0.5)
# Build PCLR/ILR within the "persistent" interface.
instance_Lagrange = gg.build_model_relaxation(input_dict, formulation='Lagrangian',
subgradient_method=subgradient,
y_dual=ys_dual, y_keep=ys_keep,
multiplier=init_multiplier, low_memory=lowmem)
opt_persistent.set_instance(instance_Lagrange)
tl.custom_log(' Solving the Lagrangian relaxations...')
# Iteratively solve and post-process data.
for i in tqdm(range(1, iterations_Lagrange + 1), desc='Lagrangean Relaxation Loop'):
results_Lagrange = opt_persistent.solve(tee=False, keepfiles=False, report_timing=False)
# multiplier_list.append(array(list(init_multiplier.values())))
# objective_list.append(tl.retrieve_upper_bound(results_lagrange))
# gap_list.append(round((tl.retrieve_upper_bound(results_lagrange) - lb) / lb * 100., 2))
# Compute next multiplier.
incumbent_multiplier = tl.retrieve_next_multiplier(instance_Lagrange, init_multiplier,
ys_keep, ys_dual, i, iterations_Lagrange,
subgradient, a=0.01, share_of_iter=0.8)
# Replace multiplier, hence the objective.
instance_Lagrange.del_component(instance_Lagrange.objective)
instance_Lagrange.objective = tl.new_cost_rule(instance_Lagrange,
ys_keep, ys_dual,
incumbent_multiplier,
low_memory=lowmem)
opt_persistent.set_objective(instance_Lagrange.objective)
# Assign new value to init_multiplier to use in the iterative multiplier calculation.
init_multiplier = incumbent_multiplier
# Create nd array from list of 1d multipliers from each iteration.
# multiplier_array = tl.concatenate_and_filter_arrays(multiplier_list, ys_keep)
if subgradient == 'Inexact':
# Build and solve ILP with the incumbent_multiplier.
instance_integer = gg.build_model_relaxation(input_dict, formulation='Lagrangian',
subgradient_method='Exact', y_dual=ys_dual, y_keep=ys_keep,
multiplier=incumbent_multiplier, low_memory=lowmem)
tl.custom_log(' Solving the integer Lagrangian problem...')
results = opt_integer.solve(instance_integer, logfile=join(output_folder, 'solver_log.log'),
tee=True, keepfiles=False, report_timing=False)
tl.custom_log(' UB = {}, LB = {}, gap = {}%'.format(tl.retrieve_upper_bound(results),
lb,
round((tl.retrieve_upper_bound(results) - lb) / lb * 100., 2)))
optimal_locations = tl.retrieve_optimal_locations(instance_integer, input_dict['critical_window_matrix'],
technologies, problem)
else:
tl.custom_log(' UB = {}, LB = {}, gap = {}%'.format(tl.retrieve_upper_bound(results_Lagrange),
lb,
round((tl.retrieve_upper_bound(results_Lagrange) - lb) / lb * 100., 2)))
optimal_locations = tl.retrieve_optimal_locations(instance_Lagrange, input_dict['critical_window_matrix'],
technologies, problem)
else:
raise ValueError(' This solution method is not available. Retry.')
output_dict = {k: v for k, v in input_dict.items() if k in ('region_list', 'coordinates_dict', 'technologies',
'capacity_factors_dict', 'critical_window_matrix',
'capacity_potential_per_node')}
output_dict.update({'optimal_location_dict': optimal_locations})
if (problem == 'Covering' and 'capacities' in objective) or (problem == 'Load' and objective == 'following'):
deployed_capacities = tl.retrieve_deployed_capacities(instance, technologies,
input_dict['capacity_potential_per_node'])
output_dict.update({'deployed_capacities_dict': deployed_capacities})
pickle.dump(output_dict, open(join(output_folder, 'output_model.p'), 'wb'))
tl.remove_garbage(keepfiles, output_folder)
if __name__ == "__main__":
main()
\ No newline at end of file
This diff is collapsed.
# Spatial resolution of the reanalysis data.
spatial_resolution: 0.5
# Path towards the resource data. Has to be manually updated for different spatial resolutions.
path_resource_data: '../resource_data/'
# Path towards different transfer functions.
path_transfer_function_data: '../input_data/transfer_functions/'
# Path towards population density data.
path_population_density_data: '../input_data/population_density/'
# Path towards protected areas data.
path_protected_areas_data: '../input_data/protected_areas/'
# path towards land characteristics data.
path_landseamask: '../input_data/land_mask/'
# Path towards load data.
path_load_data: '../input_data/load_data/'
# Path towards bus data.
path_bus_data: '../input_data/transmission_data/'
# Threshold above which no generation capacity is deployed.
population_density_threshold: 150.
# Selected protected areas. Check here for details: http://datasets.wri.org/dataset/64b69c0fb0834351bd6c0ceb3744c5ad
protected_areas_selection: ['Ia', 'Ib', 'II', 'Not Applicable']
# Distance threshold from protected areas
protected_areas_threshold: 20.
# Depth threshold
depth_threshold: 100.
# Start time and end time for slicing the database.
time_slice: ['2017-01-01T00:00', '2017-01-31T23:00']
# List of regions to be considered in the optimization.
regions: ['NA']
# Technologies to deploy. Must be in 'RESOURCE_CONVERTER' structure.
technologies: ['wind_aerodyn']
# Assessment measure for each time window. Available: mean, median or percentiles.
resource_quality_measure: 'mean'
# Defines how \alpha is considered in space and time. (load_based, uniform, percentile)
alpha_rule: 'load_based'
# For the load-dependent \alpha, it sets how regions are aggregated. (centralized, partitioned)
alpha_plan: 'centralized'
# Normalization procedures (detailed in tools.py). (min, max)
alpha_norm: 'min'
# Capacity factor threshold used to compute the criticality indicators. If given as 'float', is uniform across regions. If percentile, it's assigned location-wise.
alpha_numerical: 0.3
# Time-window length used to compute the criticality indicator.
delta: 24
# Geographical coverage threshold used to compute the criticality indicator.
beta: 0.9
# Choice of solver. Available: 'gurobi' and 'cplex'.
solver: 'gurobi'
# MIP gap --- 0.01 = 1%
mipgap: 0.04
# Type of problem to be solved. Check models.py for a full list.
main_problem: 'Covering'
# Objective of the problem. Check models.py for a full list.
main_objective: 'cardinality_floor'
# Solution method: None, Projection, Lagrange
solution_method: 'None'
# Langrangian subproblem: Inexact/Exact
subgradient_approximation: 'Inexact'
# Number of Lagrangian iterations
no_iterations_Lagrange: 1000
# Number of Simulated Annealing iterations
no_iterations_SA: 500
# Number of explorations within an epoque
no_explorations_SA: 1000
# List of deployments per region (ordered as the list above)
cardinality_constraint: [50]
# Capacity budget in GW.
capacity_constraint: 200.0
# Monetary budget in units.
cost_budget: 4000.0
# Keeping files at the end of the run
keep_files: True
# Staying away from pypsa model formulation
low_memory: False
\ No newline at end of file
This diff is collapsed.
import cdsapi
import os
# regions = {'EU':'70/-10/35/30',
# 'US':'50/-125/30/-70'}
regions = {'NA':'38/-14/28/25',
'IC':'66/-25/63/-14',
'GR':'62/-49/59/-42'}
years = ['2008', '2009','2010','2011','2012','2013','2014','2015','2016','2017']
months = ['01','02','03','04','05','06','07','08','09','10','11','12']
spatial_resolution = 0.5
for region in regions.keys():
directory = '../resource_data/' + str(spatial_resolution) + '/' + region + '/'
if not os.path.exists(directory):
os.makedirs(directory)
for key, value in regions.items():
for year in years:
for month in months:
c = cdsapi.Client()
c.retrieve(
'reanalysis-era5-single-levels',
{
'variable':['100m_u_component_of_wind','100m_v_component_of_wind',
'2m_temperature', 'surface_solar_radiation_downwards'],
'product_type':'reanalysis',
'area': str(value),
'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
'year':year,
'month':month,
'day':[ '01','02','03','04','05','06','07','08','09','10','11','12','13','14','15',
'16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31'],
'time': ['00:00', '01:00', '02:00','03:00', '04:00', '05:00','06:00', '07:00', '08:00',
'09:00', '10:00', '11:00','12:00', '13:00', '14:00','15:00', '16:00', '17:00',
'18:00', '19:00', '20:00','21:00', '22:00', '23:00'],
'format':'netcdf'
},
'../resource_data/'+str(spatial_resolution)+'/'+key+'_'+year+'_'+month+'.nc')
# c = cdsapi.Client()
# c.retrieve(
# 'reanalysis-era5-single-levels',
# {
# 'variable':['low_vegetation_cover','high_vegetation_cover','land_sea_mask','model_bathymetry', 'orography','sea_ice_cover'],
# 'product_type':'reanalysis',
# 'grid': str(spatial_resolution)+'/'+str(spatial_resolution),
# 'year':'2018',
# 'month':'12',
# 'day':'31',
# 'time':'00:00',
# 'format':'netcdf'
# },
# '../input_data/land_mask/'+'ERA5_surface_characteristics_20181231_'+str(spatial_resolution)+'.nc')
\ No newline at end of file
This diff is collapsed.
from scipy import sin, cos, pi, arccos
from numpy import arange, sqrt, floor, ceil
import pickle
import yaml
from os.path import join
from ast import literal_eval
from matplotlib.path import Path
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
def _rect_inter_inner(x1,x2):
n1=x1.shape[0]-1
n2=x2.shape[0]-1
X1=np.c_[x1[:-1],x1[1:]]
X2=np.c_[x2[:-1],x2[1:]]
S1=np.tile(X1.min(axis=1),(n2,1)).T
S2=np.tile(X2.max(axis=1),(n1,1))
S3=np.tile(X1.max(axis=1),(n2,1)).T
S4=np.tile(X2.min(axis=1),(n1,1))
return S1,S2,S3,S4
def _rectangle_intersection_(x1,y1,x2,y2):
S1,S2,S3,S4=_rect_inter_inner(x1,x2)
S5,S6,S7,S8=_rect_inter_inner(y1,y2)
C1=np.less_equal(S1,S2)
C2=np.greater_equal(S3,S4)
C3=np.less_equal(S5,S6)
C4=np.greater_equal(S7,S8)
ii,jj=np.nonzero(C1 & C2 & C3 & C4)
return ii,jj
def intersection(x1,y1,x2,y2):
"""
INTERSECTIONS Intersections of curves.
Computes the (x,y) locations where two curves intersect. The curves
can be broken with NaNs or have vertical segments.
usage:
x,y=intersection(x1,y1,x2,y2)
Example:
a, b = 1, 2
phi = np.linspace(3, 10, 100)
x1 = a*phi - b*np.sin(phi)
y1 = a - b*np.cos(phi)
x2=phi
y2=np.sin(phi)+2
x,y=intersection(x1,y1,x2,y2)
plt.plot(x1,y1,c='r')
plt.plot(x2,y2,c='g')
plt.plot(x,y,'*k')
plt.show()
"""
ii,jj=_rectangle_intersection_(x1,y1,x2,y2)
n=len(ii)
dxy1=np.diff(np.c_[x1,y1],axis=0)
dxy2=np.diff(np.c_[x2,y2],axis=0)
T=np.zeros((4,n))
AA=np.zeros((4,4,n))
AA[0:2,2,:]=-1
AA[2:4,3,:]=-1
AA[0::2,0,:]=dxy1[ii,:].T
AA[1::2,1,:]=dxy2[jj,:].T
BB=np.zeros((4,n))
BB[0,:]=-x1[ii].ravel()
BB[1,:]=-x2[jj].ravel()
BB[2,:]=-y1[ii].ravel()
BB[3,:]=-y2[jj].ravel()
for i in range(n):
try:
T[:,i]=np.linalg.solve(AA[:,:,i],BB[:,i])
except:
T[:,i]=np.NaN
in_range= (T[0,:] >=0) & (T[1,:] >=0) & (T[0,:] <=1) & (T[1,:] <=1)
xy0=T[2:,in_range]
xy0=xy0.T
return xy0[:,0],xy0[:,1]
def clip_revenue(ts, el_price, ceiling):
"""Computes revenues associated with some synthetic timeseries.
Parameters:
------------
ts : TimeSeries
Electricity generation time series.
el_price : TimeSeries
Electricity price time series
ceiling : float
Upper bound of electricity price, above which the value is clipped.
Returns:
------------
revenue : TimeSeries
Time series of hourly-sampled revenue..
"""
ts_clip = ts.where(ts <= np.quantile(ts, ceiling), 0.)
revenue = (ts_clip * el_price).sum()
return revenue
def assess_firmness(ts, threshold):
"""Function assessing time series "firmness".
Parameters:
------------
ts : TimeSeries
Electricity generation time series.
threshold : float
Capacity factor value compared to which the firmness of the
time series is assessed.
Returns:
------------
sequences : list
List of integers representing the lengths of time windows with
non-interrupted capacity factor values above "threshold".
"""
# Replace all values smaller than the threshold with 0.
mask = np.where(ts >= threshold, ts, 0)
# Retrieve the indices of non-zeros from the time series.
no_zeros = np.nonzero(mask != 0)[0]
# Determine the length of the consecutive non-zero instances.
sequences = [len(i) for i in np.split(no_zeros, np.where(np.diff(no_zeros) != 1)[0]+1)]
return sequences
def assess_capacity_credit(ts_load, ts_gen, no_deployments, threshold):
ts_load_array = ts_load.values
ts_load_mask = np.where(ts_load_array >= np.quantile(ts_load_array, threshold), 1., 0.)
ts_load_mask = pd.Series(data = ts_load_mask)
ts_gen_mean = ts_gen / no_deployments
proxy = ts_load_mask * ts_gen_mean
proxy_nonzero = proxy.iloc[proxy.to_numpy().nonzero()[0]]
return proxy_nonzero.mean()
def distsphere(lat1, long1, lat2, long2):
"""Calculates distance between two points on a sphere.
Parameters:
------------
lat1, lon1, lat2, lon2 : float
Geographical coordinates of the two points.
Returns:
------------
arc : float
Distance between points in radians.
"""
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = pi / 180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1) * degrees_to_radians
phi2 = (90.0 - lat2) * degrees_to_radians
# theta = longitude
theta1 = long1 * degrees_to_radians
theta2 = long2 * degrees_to_radians
# Compute spherical distance from spherical coordinates.
cosine = (sin(phi1) * sin(phi2) * cos(theta1 - theta2) + cos(phi1) * cos(phi2))
arc = arccos(cosine)
# Remember to multiply arc by the radius of the earth!
return arc
def update_latitude(lat1, arc):
"""Helper function that adjusts the central latitude position.
Parameters:
------------
lat1 : float
arc : float
Returns:
------------
lat2 : float
"""
degrees_to_radians = pi / 180.0
lat2 = (arc - ((90 - lat1) * degrees_to_radians)) * (1. / degrees_to_radians) + 90
return lat2
def centerMap(lons, lats):
"""Returns elements of the Basemap plot (center latitude and longitude,
height and width of the map).
Parameters:
------------
lons : list
lats : list
Returns:
------------
lon0, lat0, mapW, mapH : float
"""
# Assumes -90 < Lat < 90 and -180 < Lon < 180, and
# latitude and logitude are in decimal degrees
earthRadius = 6378100.0 # earth's radius in meters
lon0 = ((max(lons) - min(lons)) / 2) + min(lons)
b = distsphere(max(lats), min(lons), max(lats), max(lons)) * earthRadius / 2
c = distsphere(max(lats), min(lons), min(lats), lon0) * earthRadius
# use pythagorean theorom to determine height of plot
mapH = sqrt(c ** 2 - b ** 2)
mapW = distsphere(min(lats), min(lons), min(lats), max(lons)) * earthRadius
arcCenter = (mapH / 2) / earthRadius
lat0 = update_latitude(min(lats), arcCenter)
minlon = min(lons) - 1
maxlon = max(lons) + 1
minlat = min(lats) - 1
maxlat = max(lats) + 1
return lon0, lat0, minlon, maxlon, minlat, maxlat, mapH, mapW
def plot_basemap(coordinate_dict):
"""Creates the base of the plot functions.
Parameters:
------------
coordinate_dict : dict
Dictionary containing coodinate pairs within regions of interest.
Returns:
------------
dict
Dictionary containing various elements of the plot.
"""
coordinate_list = list(set([val for vals in coordinate_dict.values() for val in vals]))
longitudes = [i[0] for i in coordinate_list]
latitudes = [i[1] for i in coordinate_list]
lon0, lat0, minlon, maxlon, minlat, maxlat, mapH, mapW = centerMap(longitudes, latitudes)
land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='darkgrey',
facecolor=cfeature.COLORS['land_alt1'])
proj = ccrs.PlateCarree()
plt.figure(figsize=(10, 6))
ax = plt.axes(projection=proj)
ax.set_extent([minlon, maxlon, minlat, maxlat], proj)
ax.add_feature(land_50m, linewidth=0.5)
ax.add_feature(cfeature.OCEAN, facecolor='white')
ax.add_feature(cfeature.BORDERS.with_scale('50m'), edgecolor='darkgrey', linewidth=0.5)
gl = ax.gridlines(crs=proj, draw_labels=True, linewidth=0.5, color='gray', alpha=0.3, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabels_bottom = True
gl.xlocator = mticker.FixedLocator(arange(floor(minlon), ceil(maxlon+10), 5))
gl.ylocator = mticker.FixedLocator(arange(floor(minlat)-1, ceil(maxlat+10), 5))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
ax.outline_patch.set_edgecolor('white')
return {'basemap': ax,
'projection': proj,
'lons': longitudes,
'lats': latitudes,
'width': mapW}
def read_inputs_plotting(output_path):
"""Reads parameter file for plotting purposes.
Parameters:
------------
output_path : str
Path towards output data.
Returns:
------------
data : dict
Dictionary containing run parameters.
"""
path_to_input = join(output_path, 'parameters.yml')
with open(path_to_input) as infile:
data = yaml.safe_load(infile)
return data
def read_output(run_name):
"""Reads outputs for a given run.
Parameters:
------------
run_name : str
The name of the run (given by the function init_folder in tools.py).
Returns:
------------
output_pickle : dict
Dict-like structure containing various relevant data structures..
"""
path_to_file = join('../output_data/', run_name, 'output_model.p')
output_pickle = pickle.load(open(path_to_file, 'rb'))
return output_pickle
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment