diff --git a/examples/OntologySample200m_1turb.yaml b/examples/OntologySample200m_1turb.yaml index 3ff93e9e..6087e3b3 100644 --- a/examples/OntologySample200m_1turb.yaml +++ b/examples/OntologySample200m_1turb.yaml @@ -1295,8 +1295,7 @@ anchor_types: zlug : 10 # embedded depth of padeye [m] suction1: - type : suction_pile + type : suction L : 16.4 # length of pile [m] D : 5.45 # diameter of pile [m] zlug : 9.32 # embedded depth of padeye [m] - diff --git a/famodel/anchors/anchors_famodel/capacity_suction.py b/famodel/anchors/anchors_famodel/capacity_suction.py index d16b75a1..818d9ed1 100644 --- a/famodel/anchors/anchors_famodel/capacity_suction.py +++ b/famodel/anchors/anchors_famodel/capacity_suction.py @@ -40,7 +40,7 @@ def getCapacitySuction(D, L, zlug, H, V, soil_type, gamma, Su0=None, k=None, phi Maximum vertical capacity [kN] ''' - lambdap = L/D; m = 2/3; # Suction pile slenderness ratio + lambdap = L/D; m = 2/3; # Suction pile slenderness ratio t = (6.35 + D*20)/1e3 # Suction pile wall thickness (m), API RP2A-WSD rlug = D/2 # Radial position of the lug thetalug = 5 # Angle of tilt misaligment, default is 5. (deg) diff --git a/famodel/irma/action.py b/famodel/irma/action.py new file mode 100644 index 00000000..0e5c07ef --- /dev/null +++ b/famodel/irma/action.py @@ -0,0 +1,2122 @@ +"""Action base class""" + +import numpy as np +import matplotlib.pyplot as plt + +import moorpy as mp +from moorpy.helpers import set_axes_equal +from moorpy import helpers +import yaml +from copy import deepcopy + +#from shapely.geometry import Point, Polygon, LineString +from famodel.mooring.mooring import Mooring +from famodel.platform.platform import Platform +from famodel.anchors.anchor import Anchor +from famodel.mooring.connector import Connector +from famodel.substation.substation import Substation +from famodel.cables.cable import Cable +from famodel.cables.dynamic_cable import DynamicCable +from famodel.cables.static_cable import StaticCable +from famodel.cables.cable_properties import getCableProps, getBuoyProps, loadCableProps,loadBuoyProps +from famodel.cables.components import Joint +from famodel.turbine.turbine import Turbine +from famodel.famodel_base import Node + +# Import select required helper functions +from famodel.helpers import (check_headings, head_adjust, getCableDD, getDynamicCables, + getMoorings, getAnchors, getFromDict, cleanDataTypes, + getStaticCables, getCableDesign, m2nm, loadYAML, + configureAdjuster, route_around_anchors) + + +t2N = 9806.7 # conversion factor from t to N + +def incrementer(text): + ''' + Increments the last integer found in a string. + + Inputs + ------ + `text` : `str` + The input string to increment. + + Returns + ------- + `str` + The incremented string. + ''' + split_text = text.split()[::-1] + for ind, spl in enumerate(split_text): + try: + split_text[ind] = str(int(spl) + 1) + break + except ValueError: + continue + return " ".join(split_text[::-1]) + + +def increment_name(name): + ''' + Increments an end integer after a dash in a name. + + Inputs + ------ + `name` : `str` + The input name string. + + Returns + ------- + `str` + The incremented name string. + ''' + name_parts = name.split(sep='-') + + # if no numeric suffix yet, add one + if len(name_parts) == 1 or not name_parts[-1].isdigit(): + name = name+'-0' + # otherwise there must be a suffix, so increment it + else: + name_parts[-1] = str( 1 + int(name_parts[-1])) + + name = '-'.join(name_parts) # reassemble name string + + return name + + +class Action(): + ''' + An Action is a general representation of a marine operations action + that involves manipulating a system/design/structure using assets/ + equipment. The Action base class contains generic routines and parameters. + Specialized routines for performing each action should be set up in + subclasses. + ''' + + def __init__(self, actionType, name, **kwargs): # allReq, **kwargs): + '''Create an action object... + It must be given a name. + The remaining parameters should correspond to items in the actionType dict... + + Inputs + ---------- + `actionType` : `dict` + Dictionary defining the action type (typically taken from a yaml). + `name` : `string` + A name for the action. It may be appended with numbers if there + are duplicate names. + `allReq` : `dict` + A dicitonary of all possible requirements (capabilities) that is needed + for mapping/assigning requirements to assets. + `kwargs` + Additional arguments may depend on the action type and typically + include a list of FAModel objects that are acted upon, or + a list of dependencies (other action names/objects). + + Returns + ------- + `None` + ''' + + # list of things that will be controlled during this action + self.assetList = [] # list of assigned assets (vessels or ports) required to perform the action + self.requirements = {} # dictionary of requirements (keys) and associated required capabilities + self.objectList = [] # all objects that could be acted on + #self.materialList = [] # all materials that could be acted on + self.dependencies = {} # list of other actions this one depends on + + self.actionType = actionType # <— keep the YAML dict on the instance + #self.allReq = allReq # <— keep the full requirements dict on the instance + + self.type = getFromDict(actionType, 'type', dtype=str) + self.name = name + self.status = 0 # 0, waiting; 1=running; 2=finished + + self.duration = getFromDict(actionType, 'duration', default=0) # this will be overwritten by calcDurationAndCost. TODO: or should it overwrite any duration calculation? + self.cost = 0 # this will be overwritten by calcDurationAndCost + self.ti = 0 # action start time [h?] + self.tf = 0 # action end time [h?] + + self.supported_objects = [] # list of FAModel object types supported by the action + + ''' + # Create a dictionary of supported object types (with empty entries) + if 'objects' in actionType: #objs = getFromDict(actionType, 'objects', shape=-1, default={}) + for obj in actionType['objects']: # go through keys in objects dictionary + self.objectList[obj] = None # make blank entries with the same names + + + # Process objects according to the action type + if 'objects' in kwargs: #objects = getFromDict(kwargs, objects, default=[]) + for obj in kwargs['objects']: + objType = obj.__class__.__name__.lower() + if objType in self.objectList: + self.objectList[objType] = obj + else: + raise Exception(f"Object type '{objType}' is not in the action's supported list.") + ''' + + # Determine requirements based on action type + if 'requirements' in actionType: + self.requirements = actionType['requirements'] # copy over the requirements with zero-valued capability specs + #self.requirements = {req: True for req in actionType['requirements']} # initialize all requirements to True (needed) + self.requirements_met = {req: False for req in actionType['requirements']} # dictionary to track if requirements are met (by assigned assets). Initialized to False. + + # Process objects to be acted upon. NOTE: must occur after requirements and assets placeholders have been assigned. + # make list of supported object type names + if 'objects' in actionType: + if isinstance(actionType['objects'], list): + self.supported_objects = actionType['objects'] + elif isinstance(actionType['objects'], dict): + self.supported_objects = list(actionType['objects'].keys()) + + # Add objects to the action's object list as long as they're supported + if 'objects' in kwargs: + self.assignObjects(kwargs['objects']) + + # Based on the assigned objects, update what requirements/capabilities are needed + self.updateRequirements() + + # Process dependencies + if 'dependencies' in kwargs: + for dep in kwargs['dependencies']: + self.dependencies[dep.name] = dep + + # Process some optional kwargs depending on the action type + + + def dependsOn(self, act, recur=0): + '''Returns True if this action depends on the passed in action. + This looks through all dependencies, not just immediate.''' + + if recur > 10: + print("WARNGING, there seems to be a recursive action dependency...") + breakpoint() + + if act.name in self.dependencies: + return True + else: # Recursive search through dependent tasks + for act2 in self.dependencies.values(): + if act2.dependsOn(act, recur=recur+1): + return True + + return False + + + def updateRequirements(self): + ''' + Updates requirements based on the assigned objects or materials. + Note: any requirements whose values are not set in this method will be + subsequently removed from consideration. + ''' + # RA: let's rethink this function or brainstorm more. + if not self.objectList: + raise Exception("No objects assigned to action; cannot update requirements.") + if not self.requirements: + raise Warning("No requirements defined for action; cannot update requirements.") + return + + ''' + for req in self.requirements.keys(): + # Does this requirement require specific objects or material? + objReq = self.allReq[req]['objects'] + matReq = self.allReq[req]['material'] + if objReq: + for obj in self.objectList: + if obj in self.allReq[req]['objects']: + objType = obj.__class__.__name__.lower() + if matReq: + if objType=='mooring': + for sec in obj.dd['sections']: + if sec['type'] in matReq: + self.requirements[req] = True + break + else: # TODO: need to figure out how to deal with different objects + pass + else: + self.requirements[req] = True + + # If there are no specific object or material requirements, just set to True + if not (objReq or matReq): + self.requirements[req] = True + ''' + + # ----- Fill in required capabilities and their specifications ----- + # Note: this will eventually be populated with calculations for all + # requirement types and capability types, drawing/building from what's + # in getMetrics (which will no longer be used). + + def printNotSupported(st): + '''Prints that a certain thing isn't supported yet in this method.''' + print(f"{st} is not yet in Action.updateRequirements.") + + + # Go through every requirement (each may involve different calculations, even + # if for the same capabilities) + for reqname_full, vals in self.requirements.items(): + + reqname = vals['base'] # name of requirement without direction suffix + req = vals['capabilities'] # should rename this to caps + + + if reqname == 'towing': + + mass = 1 + + for obj in self.objectList: + try: + mass += obj.props['mass'] + except: + pass + + req['bollard_pull']['max_force'] = 0.0001*mass*t2N # <<< can add a better calculation for towing force required + + + elif reqname == 'chain_storage': # Storage specifically for chain + + chain_L = 0 + chain_vol = 0 + + for obj in self.objectList: + if isinstance(obj, Mooring): + for sec in obj.dd['subcomponents']: + if 'L' in sec.keys(): + if 'chain' in sec['type']['material']: # if chain section + chain_vol += sec['L'] * np.pi * (sec['type']['d_nom'] / 2) ** 2 * (2) # volume [m^3] + chain_L += sec['L'] # length [m] + + req['chain_locker']['volume'] += chain_vol # <<< replace with proper estimate + req['deck_space']['area'] += chain_L*0.205 # m^2 + + + elif reqname == 'rope_storage': # Storage specifically for chain + + rope_L = 0 + rope_vol = 0 + + for obj in self.objectList: + if isinstance(obj, Mooring): + for sec in obj.dd['subcomponents']: + if 'L' in sec.keys(): + if 'rope' in sec['type']['material'] or 'polyester' in sec['type']['material']: + rope_vol += sec['L'] * np.pi * (sec['type']['d_nom'] / 2) ** 2 # volume [m^3] + rope_L += sec['L'] # length [m] + + req['line_reel']['volume'] += rope_vol + req['deck_space']['area'] += np.ceil((0.0184*rope_L)/13.5)*13.5 # m^2 + + + elif reqname == 'storage': # Generic storage, such as for anchors + + for obj in self.objectList: + if isinstance(obj, Anchor): + + if 'suction' in obj.dd['type']: + # if the suction piles are to be laying down + A = (obj.dd['design']['L']+(10/3.28084)) * (obj.dd['design']['D']+(10/3.28084)) + # if the suction piles are to be standing up # <<<<<< how to implement this? Depends on the asset assignment + # A = (obj.dd['design']['D']+(10/3.28084))**2 + + req['deck_space']['area'] += A + + elif reqname == 'anchor_overboarding' or reqname == 'anchor_lowering': + for obj in self.objectList: + if isinstance(obj, Anchor): + + if obj.mass: + mass = obj.mass # [kg] + else: # rough estimate based on size + wall_thickness = (6.35 + obj.dd['design']['D']*20)/1e3 # Suction pile wall thickness (m), API RP2A-WSD. It changes for different anchor concepts + mass = (np.pi * ((obj.dd['design']['D']/2)**2 - (obj.dd['design']['D']/2 - wall_thickness)**2) * obj.dd['design']['L'] * 7850) # rough mass estimate [kg] + req['crane']['capacity'] = mass * 1.2 # <<< replace with proper estimate + req['crane']['hook_height'] = obj.dd['design']['L'] * 1.2 # <<< replace with proper estimate + if reqname == 'anchor_overboarding': + req['stern_roller']['width'] = obj.dd['design']['D'] * 1.2 # <<< replace with proper estimate + else: # anchor lowering + req['winch']['max_line_pull'] = mass * 1.2 # <<< replace with proper estimate + req['winch']['speed'] = 0.3 # [m/s] + + elif reqname == 'anchor_orienting': + for obj in self.objectList: + if isinstance(obj, Anchor): + + # req['winch']['max_line_pull_t'] = + req['rov']['depth_rating'] = abs(obj.r[-1]) * 1.2 # <<< replace with proper estimate + req['divers']['max_depth'] = abs(obj.r[-1]) * 1.2 # <<< replace with proper estimate / basically, if anchor is too deep, divers might not be an option + + elif reqname == 'anchor_embedding': + + for obj in self.objectList: + if isinstance(obj, Anchor): + + if obj.dd['type'] == 'DEA': + + req['bollard_pull']['max_force'] = 270*t2N # <<< replace with proper estimate + + elif obj.dd['type'] == 'suction': + + req['pump_subsea']['pressure'] = 1.2e5 # <<< replace with proper estimate + + else: + printNotSupported(f"Anchor type {obj.dd['type']}") + + elif reqname == 'line_handling': + req['winch']['max_line_pull'] = 1*t2N + req['crane']['capacity'] = 27*t2N # should set to mooring weight <<< + #req[''][''] + + elif reqname == 'subsea_connection': + + for obj in self.objectList: + if isinstance(obj, Mooring): + + depth = abs(obj.rA[2]) # depth assumed needed for the connect/disconnect work + req['rov']['depth_rating'] = depth + if depth < 200: # don't consider divers if deeper than this + req['divers']['max_depth'] = depth # + + else: + printNotSupported(f"Requirement {reqname}") + + # Make a copy of the requirements dict that only keeps entries > 0 + new_reqs = {} + + for reqname, req in self.requirements.items(): + for capname, cap in req['capabilities'].items(): + for key, val in cap.items(): + if val > 0: + if not reqname in new_reqs: + new_reqs[reqname] = {'base':req['base'], 'capabilities':{}, + 'direction':req['direction']} + if not capname in new_reqs[reqname]['capabilities']: + new_reqs[reqname]['capabilities'][capname] = {} + new_reqs[reqname]['capabilities'][capname][key] = val + + self.requirements = new_reqs + + + def addDependency(self, dep): + ''' + Registers other action as a dependency of this one. + + Inputs + ------ + `dep` : `Action` + The action to be added as a dependency. + + Returns + ------- + `None` + ''' + self.dependencies[dep.name] = dep + # could see if already a dependency and raise a warning if so... + + + def getMetrics(self, cap, met, obj): + ''' + Retrieves the minimum metric(s) for a given capability required to act on target object. + A metric is the number(s) associated with a capability. A capability is what an action + role requires and an asset has. + + These minimum metrics are assigned to capabilities in the action's role in `assignObjects`. + + Inputs + ------ + `cap` : `str` + The capability for which the metric is to be retrieved. + `met` : `dict` + The metrics dictionary containing any existing metrics for the capability. + `obj` : FAModel object + The target object on which the capability is to be acted upon. + + Returns + ------- + `metrics` : `dict` + The metrics and values for the specified capability and object. + + ''' + + metrics = met # metrics dict with following form: {metric_1 : required_value_1, ...}. met is assigned here in case values have already been assigned + objType = obj.__class__.__name__.lower() + + """ + Note to devs: + This function contains hard-coded evaluations of all the possible combinations of capabilities and objects. + The intent is we generate the minimum required of a given to work with the object. An + example would be minimum bollard pull required to tow out a platform. The capabilities (and their metrics) + are from capabilities.yaml and the objects are from objects.yaml. There is a decent ammount of assumptions + made here so it is important to document sources where possible. + + Some good preliminary work on this is in https://github.com/FloatingArrayDesign/FAModel/blob/IOandM_development/famodel/installation/03_step1_materialItems.py + + ### Code Explanation ### + This function has the following structure + + ``` + if cap == : + # some comments + + if objType == 'mooring': + metric_value = calc based on obj + elif objType == 'platform': + metric_value = calc based on obj + elif objType == 'anchor': + metric_value = calc based on obj + elif objType == 'component': + metric_value = calc based on obj + elif objType == 'turbine': + metric_value = calc based on obj + elif objType == 'cable': + metric_value = calc based on obj + else: + metric_value = -1 + + # Assign the capabilties metrics (keep existing metrics already in dict if larger than calc'ed value) + metrics[] = metric_value if metric_value > metrics.get() else metrics.get() + ``` + + Some of the logic for checking object types can be omitted if it doesnt make sense. For example, the chain_locker capability + only needs to be checked against the Mooring object. The comment `# object logic checked` shows that the logic in that capability + has been thought through. + + A metric_value of -1 indicates the object is not compatible with the capability. This is indicated by a warning printed at the end. + + A completed example of what this can look like is the line_reel capability. + """ + + if cap == 'deck_space': + # logic for deck_space capability (platforms and sites not compatible) + # TODO: how do we account for an action like load_mooring (which has two roles, + # representing vessels to be loaded). The combined deck space of the carriers + # should be the required deck space for the action. Right now I believe it is + # set up that only one asset can fulfill the capability minimum. + + # object logic checked + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['area_m2'] = None if None > metrics.get('area_m2') else metrics.get('area_m2') + # metrics['max_load_t'] = None if None > metrics.get('max_load_t') else metrics.get('max_load_t') + + elif cap == 'chain_locker': + # logic for chain_locker capability (only mooring objects compatible) + # object logic checked + + if objType == 'mooring': + + # set baseline values for summation + vol = 0 + length = 0 + + for i, sec in enumerate(obj.dd['sections']): # add up the volume and length of all chain in the object + if sec['type']['chain']: + diam = sec['type']['d_nom'] # diameter [m] + vol += 0.0 # TODO: calculate chain_locker volume from sec['L'] and diam. Use Delmar data from Rudy. Can we make function of chain diam? + length += sec['L'] # length [m] + + else: + vol = -1 + + # Assign the capabilties metrics + metrics['volume_m3'] = vol if vol > metrics.get('volume_m3') else metrics.get('volume_m3') + + elif cap == 'line_reel': + # logic for line_reel capability (only mooring objects compatible) + # object logic checked, complete + + if objType == 'mooring': + + # set baseline values for summation + vol = 0 + length = 0 + + for i, sec in enumerate(obj.dd['sections']): # add up the volume and length of all non_chain line in the object + if not sec['type']['chain']: # any line type thats not chain + vol += sec['L'] * np.pi * (sec['type']['d_nom'] / 2) ** 2 # volume [m^3] + length += sec['L'] # length [m] + + else: + vol = -1 + length = -1 + + # Assign the capabilties metrics + metrics['volume_m3'] = vol if vol > metrics.get('volume_m3') else metrics.get('volume_m3') + metrics['rope_capacity_m'] = length if length > metrics.get('rope_capacity_m') else metrics.get('rope_capacity_m') + + elif cap == 'cable_reel': + # logic for cable_reel capability (only cable objects compatible) + # object logic checked + vol = 0 + length = 0 + ''' + if objType == 'cable': + for cable in cables: # TODO: figure out this iteration + if cable is cable and not other thing in cables object: # TODO figure out how to only check cables, not j-tubes or any other parts + vol += cable['L'] * np.pi * (cable['type']['d_nom'] / 2) ** 2 + length += cable['L'] # length [m] + else: + vol = -1 + length = -1 + ''' + # Assign the capabilties metrics + metrics['volume_m3'] = vol if vol > metrics.get('volume_m3') else metrics.get('volume_m3') + metrics['cable_capacity_m'] = length if length > metrics.get('cable_capacity_m') else metrics.get('cable_capacity_m') + + elif cap == 'winch': + # logic for winch capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # # Assign the capabilties metrics + # metrics['max_line_pull_t'] = None if None > metrics.get('max_line_pull_t') else metrics.get('max_line_pull_t') + # metrics['brake_load_t'] = None if None > metrics.get('brake_load_t') else metrics.get('brake_load_t') + # metrics['speed_mpm'] = None if None > metrics.get('speed_mpm') else metrics.get('speed_mpm') + + elif cap == 'bollard_pull': + # per calwave install report (section 7.2): bollard pull can be described as function of vessel speed and load + + # logic for bollard_pull capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['max_force_t'] = None if None > metrics.get('max_force_t') else metrics.get('max_force_t') + + elif cap == 'crane': + # logic for deck_space capability (all compatible) + # object logic checked + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['capacity_t'] = None if None > metrics.get('capacity_t') else metrics.get('capacity_t') + # metrics['hook_height_m'] = None if None > metrics.get('hook_height_m') else metrics.get('hook_height_m') + + elif cap == 'station_keeping': + # logic for station_keeping capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['type'] = None if None > metrics.get('type') else metrics.get('type') + + elif cap == 'mooring_work': + # logic for mooring_work capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['line_types'] = None if None > metrics.get('line_types') else metrics.get('line_types') + # metrics['stern_roller'] = None if None > metrics.get('stern_roller') else metrics.get('stern_roller') + # metrics['shark_jaws'] = None if None > metrics.get('shark_jaws') else metrics.get('shark_jaws') + # metrics['towing_pin_rating_t'] = None if None > metrics.get('towing_pin_rating_t') else metrics.get('towing_pin_rating_t') + + elif cap == 'pump_surface': + # logic for pump_surface capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['pressure_bar'] = None if None > metrics.get('pressure_bar') else metrics.get('pressure_bar') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'pump_subsea': + # logic for pump_subsea capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['pressure_bar'] = None if None > metrics.get('pressure_bar') else metrics.get('pressure_bar') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'pump_grout': + # logic for pump_grout capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['flow_rate_m3hr'] = None if None > metrics.get('flow_rate_m3hr') else metrics.get('flow_rate_m3hr') + # metrics['pressure_bar'] = None if None > metrics.get('pressure_bar') else metrics.get('pressure_bar') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'hydraulic_hammer': + # logic for hydraulic_hammer capability (only platform and anchor objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': # for fixed bottom installations + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['energy_per_blow_kJ'] = None if None > metrics.get('energy_per_blow_kJ') else metrics.get('energy_per_blow_kJ') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'vibro_hammer': + # logic for vibro_hammer capability (only platform and anchor objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': # for fixed bottom installations + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['centrifugal_force_kN'] = None if None > metrics.get('centrifugal_force_kN') else metrics.get('centrifugal_force_kN') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'drilling_machine': + # logic for drilling_machine capability (only platform, anchor, and cable objects compatible) + # Considering drilling both for export cables, interarray, and anchor/fixed platform install + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'torque_machine': + # logic for torque_machine capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['torque_kNm'] = None if None > metrics.get('torque_kNm') else metrics.get('torque_kNm') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'cable_plough': + # logic for cable_plough capability (only cable objects compatible) + # object logic checked + if objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'rock_placement': + # logic for rock_placement capability (only platform, anchor, and cable objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['placement_method'] = None if None > metrics.get('placement_method') else metrics.get('placement_method') + # metrics['max_depth_m'] = None if None > metrics.get('max_depth_m') else metrics.get('max_depth_m') + # metrics['accuracy_m'] = None if None > metrics.get('accuracy_m') else metrics.get('accuracy_m') + # metrics['rock_size_range_mm'] = None if None > metrics.get('rock_size_range_mm') else metrics.get('rock_size_range_mm') + + elif cap == 'container': + # logic for container capability (only platform, turbine, and cable objects compatible) + # object logic checked + if objType == 'wec': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'rov': + # logic for rov capability (all compatible) + # object logic checked + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['class'] = None if None > metrics.get('class') else metrics.get('class') + # metrics['depth_rating_m'] = None if None > metrics.get('depth_rating_m') else metrics.get('depth_rating_m') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'positioning_system': + # logic for positioning_system capability (only platform, anchor, and cable objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['accuracy_m'] = None if None > metrics.get('accuracy_m') else metrics.get('accuracy_m') + # metrics['methods'] = None if None > metrics.get('methods') else metrics.get('methods') + + elif cap == 'monitoring_system': + # logic for monitoring_system capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['metrics'] = None if None > metrics.get('metrics') else metrics.get('metrics') + # metrics['sampling_rate_hz'] = None if None > metrics.get('sampling_rate_hz') else metrics.get('sampling_rate_hz') + + elif cap == 'sonar_survey': + # logic for sonar_survey capability (only anchor and cable objects compatible) + # object logic checked + if objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['types'] = None if None > metrics.get('types') else metrics.get('types') + # metrics['resolution_m'] = None if None > metrics.get('resolution_m') else metrics.get('resolution_m') + + else: + raise Exception(f"Unsupported capability '{cap}'.") + + for met in metrics.keys(): + if metrics[met] == -1: + print(f"WARNING: No metrics assigned for '{met}' metric in '{cap}' capability based on object type '{objType}'.") + + + return metrics # return the dict of metrics and required values for the capability + + + def assignObjects(self, objects): + ''' + Adds a list of objects to the actions objects list and + calculates the required capability metrics, checking objects + are valid for the actions supported objects. + + The minimum capability metrics are used by when checking for + compatibility and assinging assets to the action in `assignAsset`. + Thus this function should only be called in the intialization + process of an action. + + Inputs + ------ + `objects` : `list` + A list of FAModel objects to be added to the action. + + Returns + ------- + `None` + ''' + + for obj in objects: + + # Check compatibility, set capability metrics based on object, and assign object to action + + objType = obj.__class__.__name__.lower() # object class name + if objType not in self.supported_objects: + raise Exception(f"Object type '{objType}' is not in the action's supported list.") + else: + if obj in self.objectList: + print(f"Warning: Object '{obj}' is already in the action's object list. Capabilities will be overwritten.") + ''' + # Set capability requirements based on object + for role, caplist in self.requirements.items(): + for cap in caplist: + metrics = self.getMetrics(cap, caplist[cap], obj) # pass in the metrics dict for the cap and the obj + + self.requirements[role][cap] = metrics # assign metric of capability cap based on value required by obj + # MH: commenting our for now just so the code will run, but it may be better to make the above a separate step anyway + # RA: under progress, this is to be handled in updateRequirements now. + ''' + self.objectList.append(obj) + + def assignMaterials(self, materials): + ''' + Adds a list of materials to the actions materials list. + + Inputs + ------ + `materials` : `list` + A list of material dicts to be added to the action. + + Returns + ------- + `None` + ''' + + for mat in materials: + if mat in self.materialList: + print(f"Warning: Material '{mat['name']}' is already in the action's material list.") + self.materialList.append(mat) + + + + def checkAssets(self, assets): + ''' + Checks if a specified set of assets has sufficient capabilities and + specs to fulfill all requirements in this action. + + Parameters + ---------- + asset : list of assets + ''' + + # Sum up the asset capabilities and their specs (not sure this is useful/valid) + + # Here's a list of specs we might want to take the max of instead of sum: Add more as needed + specs_to_max = ['hook_height', 'depth_rating', + 'max_depth', 'accuracy', + 'speed', 'capacity'] # capacity_t is here because it doesn't make sense to have two cranes to lift a single anchor. + asset_caps = {} + for asset in assets: + for cap, specs in asset['capabilities'].items(): + if not cap in asset_caps: # add the capability entry if absent + asset_caps[cap] = {} + for key, val in specs.items(): + if key in asset_caps[cap]: + if key in specs_to_max: + asset_caps[cap][key] = max(asset_caps[cap][key], val) + else: + asset_caps[cap][key] += val # add to the spec + else: + asset_caps[cap][key] = val # create the spec + + print('Combined asset specs are as follows:') + for cap, specs in asset_caps.items(): + print(f' Capability {cap}') + for key, val in specs.items(): + print(f' Total spec {key} = {val}') + + # <<< maybe instead of all this we should do an approach that looks by asset + # because that could then also be used to decide asset assignment + # to each requirement >>> + + requirements_met = {} + for req, vals in self.requirements.items(): # go through each requirement + + caps = vals['capabilities'] + dir = vals['direction'] + + # The following logic should mark a requirement as met if any one of + # the requirement's needed capabilities has all of its specs by the + # combined spec values of the assets + + requirements_met[req] = False # start assume it is not met + + for cap, specs in caps.items(): # go throuch capability of the requirement + if cap not in asset_caps: # assets don't have this capability, move on + continue + + # Let's check if this capability is sufficient + capable = True + for key, val in specs.items(): # go through each spec for this capability + + if val == 0: # if zero value, no spec required, move on + continue + if key not in asset_caps[cap]: # if the spec is missing, fail + capable = False + print(f"Warning: capability '{cap}' does not have metric '{key}'.") + break + if asset_caps[cap][key] < val: # if spec is too small, fail + # note: may need to add handling for lists/strings, or standardize specs more + capable = False + print(f"Warning: capability '{cap}' does not meet metric '{key}' requirement of {val:.2f} (has {asset_caps[cap][key]:.2f}).") + break + + if capable: + requirements_met[req] = True # one capability fully satisfies the requirement + break # no need to check other capabilities for this requirement + + if not requirements_met[req]: + print(f"Requirement '{req}' is not met by asset(s):") + # print(f"{assets}.") + + assignable = all(requirements_met.values()) + + # message: + if assignable: + message = "Asset meets all required capabilities." + else: + unmet = [req for req, met in requirements_met.items() if not met] + detailed = [] + for req in unmet: + expected = [cap for cap in self.requirements[req].keys()] + detailed.append(f"- {req}: {expected}.") + detailed_msg = "\n".join(detailed) + + message = "Asset does not meet the following required capabilities:\n" + detailed_msg + return assignable, message + + + def checkAsset(self, asset): + ''' + Checks if a specified asset has sufficient capabilities to fulfil + all requirements in this action. + + Inputs + ------ + `asset` : `dict` + The asset to check against the requirements. + + Returns + ------- + `bool` + True if the asset meets the requirements, False otherwise. + `str` + A message providing additional information about the check. + ''' + + requirements_met = {} + for req, needed in self.requirements.items(): + if needed: + has_cap = any(cap in asset['capabilities'] for cap in self.allReq[req]['capabilities']) + requirements_met[req] = has_cap + else: + requirements_met[req] = True # requirement not needed, so considered met + + assignable = all(requirements_met.values()) + + # message: + if assignable: + message = "Asset meets all required capabilities." + else: + unmet = [req for req, met in requirements_met.items() if not met] + detailed = [] + for req in unmet: + expected = self.allReq[req]['capabilities'] + detailed.append(f"- {req}: requires one of {expected}.") + detailed_msg = "\n".join(detailed) + + detailed_msg += f"\nAsset has the following capabilities: {[cap for cap in asset['capabilities'].keys()]}" + message = "Asset does not meet the following required capabilities:\n" + detailed_msg + + + return assignable, message + + # Old method: + # # Make sure role_name is valid for this action + # if not role_name in self.assets.keys(): + # raise Exception(f"The specified role '{role_name}' is not named in this action.") + + # if self.assets[role_name] is not None: + # return False, f"Role '{role_name}' is already filled in action '{self.name}'." + + # for capability in self.requirements[role_name].keys(): + + # if capability in asset['capabilities'].keys(): # check capability is in asset + + # # TODO: does this work if there are no metrics in a capability? This should be possible, as not all capabilities will require a constraint. + # for metric in self.requirements[role_name][capability].keys(): # loop over the capacity requirements for the capability (if more than one) + + # if metric not in asset['capabilities'][capability].keys(): # value error because capabilities are defined in capabilities.yaml. This should only be triggered if something has gone wrong (i.e. overwriting values somewhere) + # raise ValueError(f"The '{capability}' capability does not have metric: '{metric}'.") + + # if self.requirements[role_name][capability][metric] > asset['capabilities'][capability][metric]: # check requirement is met + # return False, f"The asset does not have sufficient '{metric}' for '{capability}' capability in '{role_name}' role of '{self.name}' action." + + # return True, 'All capabilities in role met' + + # else: + # return False, f"The asset does not have the '{capability}' capability for '{role_name}' role of '{self.name}' action." # a capability is not met + + + def calcDurationAndCost(self): + ''' + Calculates duration and cost for the action, based on the time for + each requirement to be performed based on the selected capability + and the assigned asset(s) that meeting that capability. + The durations of each requirement are assumed to add (i.e. each is + done in series rather than parallel). <<< MH: is this okay? <<< + TODO: finish description + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + + # Check that all roles in the action are filled + ''' + for req, met in self.requirements_met.items(): + if not met: + raise Exception(f"Requirement '{req}' is not met in action '{self.name}'. Cannot calculate duration and cost.") + ''' + + if len(self.assetList) == 0: + raise Exception(f"Cannot calculate action {self.name} because no assets have been succesfully assigned.") + + + # Initialize itimized cost and duration dictionaries + self.costs = {} # [$] + self.durations = {} # [h] + + """ + Note to devs: + The code here calculates the cost and duration of an action. Each action in the actions.yaml has a hardcoded 'model' + here that is used to evaluate the action based on the assets assigned to it. + + This is where a majority of assumptions about the action's behavior are made, so it is key to cite references behind + any abnormal approaches. + + Some good preliminary work on this is in https://github.com/FloatingArrayDesign/FAModel/blob/IOandM_development/famodel/installation/ + and in assets.py + """ + + # --- Mobilization --- + if self.type == 'mobilize': + # Hard-coded example of mobilization times based on vessel type - from the calwave installation example. + durations = { + 'crane_barge': 3.0, + 'research_vessel': 1.0 + } + mob_times = [] # store time of each vessel (the next lines of code could maybe be simplified) + for asset in self.assetList: + asset_type = asset['type'].lower() + for key, duration in durations.items(): + if key in asset_type: + mob_times.append(duration) + + # vessels mobilize in parallel so store the max time + self.durations['mobilize'] = max(mob_times) + + + elif self.type == 'demobilize': + # Hard-coded example of demobilization times based on vessel type - from the calwave installation example. + durations = { + 'crane_barge': 3.0, + 'research_vessel': 1.0 + } + mob_times + for asset in self.assetList: + asset_type = asset['type'].lower() + for key, duration in durations.items(): + if key in asset_type: + mob_times.append(duration) + + # vessels demobilize in parallel so store the max time + self.durations['demobilize'] = max(mob_times) + + + elif self.type == 'load_cargo': + pass + + # --- Towing & Transport --- + elif self.type == 'tow': + + req = self.requirements['towing'] # look at bollard pull requirement + + distance = 2500 # <<< need to eventually compute distances based on positions + + speed = req['assigned_assets'][0]['capabilities']['bollard_pull']['site_speed'] + + self.durations['tow'] = distance / speed / 60 / 60 # duration [hr] + + elif self.type == 'transit_linehaul_self': + # TODO: RA: Needs to be updated based on new format (no roles)! - Note to dev: try to reduce (try/except) statements + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + vessel = self.assetList[0] # MH: using first asset for now <<< + if vessel is None: + raise ValueError('transit_linehaul_self: no vessel assigned.') + + tr = vessel['transport'] + + # distance + dist_m = float(tr['route_length_m']) + + # speed: linehaul uses transport.cruise_speed_mps + speed_mps = float(tr['cruise_speed_mps']) + + dur_h = dist_m/speed_mps/3600.0 + self.duration += dur_h + # cost + rate_per_hour = 0.0 + for _, asset in self.assetList: + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + + elif self.type == 'transit_linehaul_tug': + # TODO: RA: Needs to be updated based on new format (no roles)! - Note to dev: try to reduce (try/except) statements + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + tug = self.assets.get('operator') or self.assets.get('vessel') + barge = self.assets.get('carrier') + if tug is None or barge is None: + raise ValueError('transit_linehaul_tug: need tug (operator) and barge (carrier).') + + tr_b = barge.get('transport', {}) + tr_t = tug.get('transport', {}) + + # distance: prefer barge’s transport + dist_m = float(tr_b.get('route_length_m', tr_t['route_length_m'])) + + # speed for convoy linehaul: barge (operator) cruise speed + operator = self.assets.get('operator') or self.assets.get('vessel') + if operator is None: + raise ValueError('transit_linehaul_tug: operator (barge) missing.') + + speed_mps = float(operator['transport']['cruise_speed_mps']) + + dur_h = dist_m/speed_mps/3600.0 + + + self.duration += dur_h + + # cost + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + elif self.type == 'transit_onsite_self': + # TODO: RA: Needs to be updated based on new format (no roles)! - Note to dev: try to reduce (try/except) statements + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + # vessel (Beyster) required + vessel = self.assets.get('vessel') or self.assets.get('operator') or self.assets.get('carrier') + if vessel is None: + raise ValueError('transit_onsite_self: no vessel assigned.') + + # NEW: quick vessel print + try: + print(f"[onsite_self] {self.name}: vessel={vessel.get('type')}") + except Exception: + pass + + # destination anchor from objects (required) + if not self.objectList: + raise ValueError('transit_onsite_self: destination anchor missing in objects.') + dest = self.objectList[0] + r_dest = getattr(dest, 'r', None) + + # NEW: print dest + try: + print(f"[onsite_self] {self.name}: r_dest={r_dest}") + except Exception: + pass + + # infer start from dependency chain (BFS up to depth 3) + r_start = None + from collections import deque + q, seen = deque(), set() + for dep in self.dependencies.values(): + q.append((dep, 0)); seen.add(id(dep)) + while q: + node, depth = q.popleft() + if node.objectList and hasattr(node.objectList[0], 'r'): + r_start = node.objectList[0].r + break + # if depth < 3: + # for nxt in node.dependencies.values(): + # if id(nxt) in seen: continue + # seen.add(id(nxt)); q.append((nxt, depth+1)) + + # NEW: print BFS result + try: + print(f"[onsite_self] {self.name}: r_start(BFS)={r_start}") + except Exception: + pass + + # CHANGED: fallback for first onsite leg → try centroid, else keep old zero-distance fallback + if r_start is None and r_dest is not None: + # NEW: centroid read (linehaul_to_site should set it on this action) + cent = (getattr(self, 'meta', {}) or {}).get('anchor_centroid') + if cent is None: + cent = (getattr(self, 'params', {}) or {}).get('anchor_centroid') + if cent is not None and len(cent) >= 2: + r_start = (float(cent[0]), float(cent[1])) + try: + print(f"[onsite_self] {self.name}: using centroid as r_start={r_start}") + except Exception: + pass + else: + # ORIGINAL behavior: assume zero in-field distance + r_start = r_dest + try: + print(f"[warn] {self.name}: could not infer start from deps; assuming zero in-field distance.") + except Exception: + pass + + # 2D distance [m] + from math import hypot + dx = float(r_dest[0]) - float(r_start[0]) + dy = float(r_dest[1]) - float(r_start[1]) + dist_m = hypot(dx, dy) + + # NEW: print distance + try: + print(f"[onsite_self] {self.name}: dist_m={dist_m:.1f} (start={r_start} → dest={r_dest})") + except Exception: + pass + + # onsite speed from capabilities.engine (SI) + cap_eng = vessel.get('capabilities', {}).get('engine', {}) + speed_mps = float(cap_eng['site_speed']) + + self.duration += dist_m/speed_mps/3600.0 + + # NEW: print duration increment + try: + print(f"[onsite_self] {self.name}: speed_mps={speed_mps:.3f}, dT_h={dist_m/speed_mps/3600.0:.3f}, total={self.duration:.3f}") + except Exception: + pass + + # cost + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + elif self.type == 'transit_onsite_tug': + # TODO: RA: Needs to be updated based on new format (no roles)! - Note to dev: try to reduce (try/except) statements + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + # assets required (operator = San_Diego tug; carrier = Jag barge) + operator = self.assets.get('operator') or self.assets.get('vessel') + carrier = self.assets.get('carrier') + if operator is None and carrier is None: + raise ValueError('transit_onsite_tug: no operator/carrier assigned.') + + # quick prints + try: + op_name = operator.get('type') if operator else None + ca_name = carrier.get('type') if carrier else None + print(f"[onsite_tug] {self.name}: operator={op_name} carrier={ca_name}") + except Exception: + pass + + # destination anchor from objects (required) + if not self.objectList: + raise ValueError('transit_onsite_tug: destination anchor missing in objects.') + dest = self.objectList[0] + r_dest = getattr(dest, 'r', None) + + try: + print(f"[onsite_tug] {self.name}: r_dest={r_dest}") + except Exception: + pass + + # infer start from dependency chain (BFS up to depth 3) + r_start = None + from collections import deque + q, seen = deque(), set() + for dep in self.dependencies.values(): + q.append((dep, 0)); seen.add(id(dep)) + while q: + node, depth = q.popleft() + if node.objectList and hasattr(node.objectList[0], 'r'): + r_start = node.objectList[0].r + break + # if depth < 3: + # for nxt in node.dependencies.values(): + # if id(nxt) in seen: continue + # seen.add(id(nxt)); q.append((nxt, depth+1)) + + try: + print(f"[onsite_tug] {self.name}: r_start(BFS)={r_start}") + except Exception: + pass + + # fallback for first onsite leg: use centroid if present, else zero-distance fallback + if r_start is None and r_dest is not None: + cent = (getattr(self, 'meta', {}) or {}).get('anchor_centroid') + if cent is None: + cent = (getattr(self, 'params', {}) or {}).get('anchor_centroid') + if cent is not None and len(cent) >= 2: + r_start = (float(cent[0]), float(cent[1])) + try: + print(f"[onsite_tug] {self.name}: using centroid as r_start={r_start}") + except Exception: + pass + else: + r_start = r_dest + try: + print(f"[warn] {self.name}: could not infer start from deps; assuming zero in-field distance.") + except Exception: + pass + + # 2D distance [m] + from math import hypot + dx = float(r_dest[0]) - float(r_start[0]) + dy = float(r_dest[1]) - float(r_start[1]) + dist_m = hypot(dx, dy) + + try: + print(f"[onsite_tug] {self.name}: dist_m={dist_m:.1f} (start={r_start} → dest={r_dest})") + except Exception: + pass + + # speed for convoy onsite: barge (operator) site speed + operator = self.assets.get('operator') or self.assets.get('vessel') + if operator is None: + raise ValueError('transit_onsite_tug: operator (barge) missing.') + + cap_eng = operator.get('capabilities', {}).get('bollard_pull', {}) + speed_mps = float(cap_eng['site_speed']) + + self.duration += dist_m/speed_mps/3600.0 + + try: + print(f"[onsite_tug] {self.name}: speed_mps={speed_mps:.3f}, dT_h={dist_m/speed_mps/3600.0:.3f}, total={self.duration:.3f}") + except Exception: + pass + + # cost (unchanged) + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + elif self.type == 'at_site_support': + pass + elif self.type == 'transport_components': + pass + + # --- Mooring & Anchors --- + + elif self.type == 'load_mooring': + + # total mooring length that needs to be loaded + L = 0 + m = 0 + + # list of just the mooring objects (should be all of them) + moorings = [obj for obj in self.objectList if isinstance(obj, Mooring)] + + # for obj in self.objectList: + # if isinstance(obj, Mooring): + # L += obj.props['length'] + + req = self.requirements['line_handling'] # look at line handling requirement + + if 'winch' in req['selected_capability']: # using a winch to load + + speed = req['assigned_assets'][0]['capabilities']['winch']['speed']*60 # [m/min] + + L = sum([mooring['length'] for mooring in moorings]) + + self.durations['load mooring by winch'] = L / speed / 60 # duration [hr] + + elif 'crane' in req['selected_capability']: # using a crane to load + + # temporarily estimate 1h per crane loading <<< + self.durations['load mooring by crane'] = 1.0 * len(moorings) + + + elif self.type == 'install_anchor': + # YAML override (no model if present) + default_duration = None + try: + default_duration = getFromDict(self.actionType, 'duration_h', dtype=float) + except ValueError: + default_duration = None + + if default_duration is not None: + computed_duration_h = default_duration + + else: + # Expect an anchor object in self.objectList + if not self.objectList: + raise ValueError("install_anchor: no anchor object provided in 'objects'.") + + # 1) Relevant metrics for cost and duration + anchor = self.objectList[0] + L = anchor.dd['design']['L'] + depth_m = abs(float(anchor.r[2])) + + # 2) Winch vertical speed [mps] + # TODO: RA: Also, what if the anchor is using 'barge' for 'storage' (anchor is in the barge) but another asset has the winch? This is not a problem if the other asset uses the crane to install the anchor. + req = self.requirements['anchor_lowering'] # calculate the time for anchor lowering + + v_mpm = None + if 'winch' in req['selected_capability']: # using a winch to lower + v_mpm = req['assigned_assets'][0]['capabilities']['winch']['speed']*60 # [m/min] + elif 'crane' in req['selected_capability']: # using a crane to lower + v_mpm = req['assigned_assets'][0]['capabilities']['crane']['speed']*60 # [m/min] + + if v_mpm: # there is only a lowering time if a winch or crane is involved + self.durations['anchor lowering'] = depth_m/v_mpm /60 # [h] + + # 3) Penetration time ~ proportional to L + if 'anchor_embedding' in self.requirements: + req = self.requirements['anchor_embedding'] + if 'pump_subsea' in req['selected_capability']: # using a winch to lower + specs = req['assigned_assets'][0]['capabilities']['pump_subsea'] # pump specs + embed_speed = 1E-4*specs['power']/(np.pi/4*anchor.dd['design']['D']**2) # <<< example of more specific calculation + else: + embed_speed = 0.07 # embedment rate [m/min] + self.durations['anchor embedding'] = L*embed_speed / 60 + + # 4) Connection / release (fixed) + self.durations['anchor release'] = 15/60 + + elif self.type == 'retrieve_anchor': + pass + elif self.type == 'lay_mooring': #'install_mooring': + + mooring = self.objectList[0] # assume there's one mooring for now + + # find installation depth of end A (assuming that's the end to be hooked up now) + depth = abs(mooring.rA[2]) + # Note: Eventually could have logic in here to figure out if the mooring was + # already lowered/attached with the anchor in a previous step (based on the + # previous action, which assets/objects were involved, attachments, etc.). + + if 'line_handling' in self.requirements: + req = self.requirements['line_handling'] # calculate the time for paying out line + + # note: some of this code is repeated and could be put in a function + v_mpm = None + if 'winch' in req['selected_capability']: # using a winch to lower + v_mpm = req['assigned_assets'][0]['capabilities']['winch']['speed']*60 # [m/min] + elif 'crane' in req['selected_capability']: # using a crane to lower + v_mpm = req['assigned_assets'][0]['capabilities']['crane']['speed']*60 # [m/min] + + if v_mpm: # there is only a lowering time if a winch or crane is involved + self.durations['mooring line lowering'] = depth/v_mpm /60 # [h] + + if 'subsea_connection' in self.requirements: + req = self.requirements['subsea_connection'] + if 'rov' in req['selected_capability']: + time = 1 + depth/500 + elif 'divers' in req['selected_capability']: + time = 1 + depth/100 + + self.durations['mooring-anchor connection'] = time + + + + elif self.type == 'mooring_hookup': + + mooring = self.objectList[0] # assume there's one mooring for now + + # find resting depth of end A (assuming that's the end to be hooked up now) + depth = abs(mooring.rA[2]) + + if 'line_handling' in self.requirements: + req = self.requirements['line_handling'] # calculate the time for paying out line + + # note: some of this code is repeated and could be put in a function + v_mpm = None + if 'winch' in req['selected_capability']: # using a winch to lower + v_mpm = req['assigned_assets'][0]['capabilities']['winch']['speed']*60 # [m/min] + elif 'crane' in req['selected_capability']: # using a crane to lower + v_mpm = req['assigned_assets'][0]['capabilities']['crane']['speed']*60 # [m/min] + + if v_mpm: # there is only a lowering time if a winch or crane is involved + self.durations['mooring line retrieval'] = depth/v_mpm /60 # [h] + + # >>> tensioning could be added <<< + self.durations['generic hookup and tensioning time'] = 1 + + + # --- Heavy Lift & Installation --- + elif self.type == 'install_wec': + pass + elif self.type == 'install_semisub': + pass + elif self.type == 'install_spar': + pass + elif self.type == 'install_tlp': + pass + elif self.type == 'install_turbine': + pass + + # --- Cable Operations --- + elif self.type == 'lay_cable': + pass + elif self.type == 'cable_hookup': + pass + elif self.type == 'retrieve_cable': + pass + elif self.type == 'lay_and_bury_cable': + pass + elif self.type == 'backfill_rockdump': + pass + + # --- Survey & Monitoring --- + elif self.type == 'site_survey': + pass + + elif self.type == 'monitor_installation': + # TODO: RA: Needs to be updated based on new format (no roles)! - Note to dev: try to reduce (try/except) statements + # 1) YAML override first + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + # --- find the paired install --- + ref_install = getattr(self, 'paired_install', None) + + # fallback: BFS through deps to find an install on the same anchor + if ref_install is None: + anchor_obj = self.objectList[0] if self.objectList else None + from collections import deque + q, seen = deque(), set() + for dep in self.dependencies.values(): + q.append((dep, 0)); seen.add(id(dep)) + while q: + node, depth = q.popleft() + if getattr(node, 'type', None) == 'install_anchor': + if anchor_obj and node.objectList and node.objectList[0] is anchor_obj: + ref_install = node + break + if ref_install is None: + ref_install = node + if depth < 3: + for nxt in node.dependencies.values(): + if id(nxt) in seen: continue + seen.add(id(nxt)); q.append((nxt, depth+1)) + + # --- get install duration, compute-on-demand if needed (no side effects) --- + inst_dur = 0.0 + if ref_install is not None: + inst_dur = float(getattr(ref_install, 'duration', 0.0) or 0.0) + + # if not computed yet, safely compute and restore + if inst_dur <= 0.0 and not getattr(ref_install, '_in_monitor_pull', False): + try: + ref_install._in_monitor_pull = True # guard re-entrancy + prev_cost = ref_install.cost + prev_dur = ref_install.duration + d, _ = ref_install.calcDurationAndCost() + inst_dur = float(d) if d is not None else 0.0 + # restore to avoid double counting later + ref_install.cost = prev_cost + ref_install.duration = prev_dur + finally: + ref_install._in_monitor_pull = False + + self.duration += inst_dur + + # cost (same pattern you use elsewhere) + rate_per_hour = 0.0 + for _, asset in self.assetList: + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration * rate_per_hour + + else: + raise ValueError(f"Action type '{self.type}' not recognized.") + + + # Sum up duration + self.duration = sum(self.durations.values()) + + # Add cost of all assets involved for the duration of the action [$] + for asset in self.assetList: + self.costs[f"{asset['name']} day rate"] = self.duration * asset['day_rate'] + + # Sum up cost + #self.cost += self.duration * sum([asset['day_rate'] for asset in self.assetList]) + self.cost = sum(self.costs.values()) + + return self.duration, self.cost + + + def setStartTime(self, start_time): + '''Update the start time of the action [in h]. + ''' + + # Update task start and finish times + self.ti = start_time + self.tf = start_time + self.duration + + + def evaluateAssets(self, assets): + ''' + Checks assets for all the roles in the action. This calls `checkAsset()` + for each role/asset pair and then calculates the duration and + cost for the action as if the assets were assigned. Does not assign + the asset(s) to the action. WARNING: this function will clear the values + (but not keys) in `self.assets`. + + Inputs + ------ + `assets` : `dict` + Dictionary of {role_name: asset} pairs for assignment of the + assets to the roles in the action. + + Returns + ------- + `cost` : `float` + Estimated cost of using the asset. + `duration` : `float` + Estimated duration of the action when performed by asset. + ''' + + # Check each specified asset for its respective role + + if not isinstance(assets, list): + assets = [assets] + + assignable, message = self.checkAssets(assets) + if assignable: + self.assetList.extend(assets) # Assignment required for calcDurationAndCost(), will be cleared later + self.requirements_met = {req: True for req in self.requirements_met.keys()} # all requirements met. Will be clearer later + else: + print('INFO: '+message+' Action cannot be completed by provided asset list.') + return -1, -1 # return negative values to indicate incompatibility. Loop is terminated becasue assets not compatible for roles. + + # RA: This is not needed now as we evaluate requirements being met in checkAsset: + # # Check that all roles in the action are filled + # for role_name in self.requirements.keys(): + # if self.assets[role_name] is None: + + # raise Exception(f"Role '{role_name}' is not filled in action '{self.name}'. Cannot calculate duration and cost.") # possibly just a warning and not an exception? + + + duration, cost = self.calcDurationAndCost() + + # Clear assets assigned for evaluation + self.clearAssets() + + return duration, cost # values returned here rather than set because will be used to check compatibility and not set properties of action + + + def assignAsset(self, asset): + ''' + Checks if asset can be assigned to an action. + If yes, assigns asset to role in the action. + + Inputs + ------ + `asset` : `dict` + The asset to be assigned. + + Returns + ------- + `None` + ''' + # RA: we removed roles, we don't do this anymore. + # # Make sure role_name is valid for this action + # if not role_name in self.assets.keys(): + # raise Exception(f"The specified role name '{role_name}' is not in this action.") + + # New Method: RA + + # Let's check the asset first + ok, msg = self.checkAsset(asset) + + if not ok: + raise Exception(f"Asset '{asset['type']}' cannot be assigned to action '{self.name}': {msg}") + + # Now, does it make sense to assign this asset if it's only meeting requirements that have already been met? + # Which requirements are currently unmet: + unmet = [req for req, met in self.requirements_met.items() if not met] + + # If no requirements remain unmet, then adding this asset is pointless + if not unmet: + raise Exception(f"All requirements for action '{self.name}' are already met. Asset '{asset['type']}' cannot be assigned.") + + # Now, determine whether this asset provides something we need + assetCaps = set(asset['capabilities'].keys()) + neededCaps = set() + for req in unmet: + neededCaps.update(self.allReq[req]['capabilities']) + + # We can check if asset provides any needed capabilities by 'intersecting' the two sets + if len(assetCaps.intersection(neededCaps)) == 0: + raise Exception( + f"Asset '{asset['name']}' does not provide any needed capabilities.\n" + f"Unmet requirements: {unmet}\n" + f"Asset capabilities: {assetCaps}\n" + f"Needed capabilities: {neededCaps}" + ) + + # if we reach here, asset is useful. + self.assetList.append(asset) + + # Update requirements_met based on this asset + for req in unmet: + if any(cap in assetCaps for cap in self.allReq[req]['capabilities']): + self.requirements_met[req] = True + + + # Old Method: + # if self.assets[role_name] is not None: + # raise Exception(f"Role '{role_name}' is already filled in action '{self.name}'.") + + # assignable, message = self.checkAsset(role_name, asset) + # if assignable: + # self.assets[role_name] = asset + # else: + # raise Exception(message) # throw error message + + def assignAssets(self, assets): + ''' + Assigns assets to all the roles in the action. This calls + `assignAsset()` that calculates the + duration and cost for the action (if assignable). Similar to `evaluateAssets()` + however here assets are assigned and duration and cost are + set after evaluation. + + Inputs + ------ + `assets` : `list` + list of assets for assignment of the + assets to the requirements in the action. + ''' + + #MHnote: this should at some point have logic that figures out + # which asset(s) meet which requirements, and then store that + # somewhere. + + ''' + # Assign each specified asset to its respective role + for asset in assets: + self.assignAsset(asset) + + + + self.calcDurationAndCost() + ''' + self.assetList = assets + + + def clearAssets(self): + ''' + Clears all assigned assets from the action. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + self.assetList = [] + self.requirements_met = {req: False for req in self.requirements_met.keys()} + + # ----- Below are drafts of methods for use by the engine ----- + """ + def begin(self): + ''' + Take control of all objects. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + for vessel in self.vesselList: + vessel._attach_to(self) + for object in self.objectList: + object._attach_to(self) + + + def end(self): + ''' + Release all objects. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + for vessel in self.vesselList: + vessel._detach_from() + for object in self.objectList: + object._detach_from() + """ + + def timestep(self): + ''' + Advance the simulation of this action forward one step in time. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + + # (this is just documenting an idea for possible future implementation) + # Perform the hourly action of the task + + if self.type == 'tow': + # controller - make sure things are going in right direction... + # (switch mode if need be) + if self.mode == 0 : # gathering vessels + ves = self.assets['vessel'] + dr = self.r_start - ves.r + ves.setCourse(dr) # sets vessel velocity + + # if vessel is stopped (at the object), time to move + if np.linalg.norm(ves.v) == 0: + self.mode = 1 + + if self.mode == 1: # towing + ves = self.assets['vessel'] + dr = self.r_finish - ves.r + ves.setCourse(dr) # sets vessel velocity + + # if all vessels are stopped (at the final location), time to end + if np.linalg.norm(ves.v) == 0: + self.mode = 2 + + if self.mode == 2: # finished + self.end() + + +if __name__ == "__main__": + + + # simple example + from famodel.project import Project + from famodel.irma.irma import Scenario + + project = Project(file='../../examples/OntologySample200m_1turb.yaml', raft=False) + sc = Scenario() # class instance holding most of the info + akey = 'fowt0a' + anchor = project.anchorList[akey] + #act = sc.addAction('install_anchor', f'install_anchor-{akey}', sc.requirements, objects=[anchor]) + act = sc.addAction('install_anchor', f'install_anchor-{akey}', objects=[anchor]) + + + # Check asset + asset1 = sc.vessels['AHTS_alpha'] + asset2 = sc.vessels['Barge_squid'] + + _, msg1 = act.checkAssets([asset1]) + _, msg2 = act.checkAssets([asset2]) + _, msg12 = act.checkAssets([asset1, asset2]) + + print(msg1 ) + print(msg2 ) + print(msg12) + ''' + act.requirements['station_keeping'] = False # <<< temporary fix, station_keeping is not listed under capabilities in vessels.yaml for some reason! investigate. + assignable_AHTS, message_AHTS = act.checkAsset(asset1) + assignable_BRGE, message_BRGE = act.checkAsset(asset2) + + print(message_AHTS) + print(message_BRGE) + + assert assignable_AHTS==True, "Asset AHTS_alpha should be assignable to install_anchor action." + assert assignable_BRGE==False, "Asset Barge_squid should NOT be assignable to install_anchor action." + + # Evaluate asset + duration, cost = act.evaluateAssets([asset1]) + print(f"Case1: Evaluated duration: {duration} h, cost: ${cost}") + duration, cost = act.evaluateAssets([asset2]) + print(f"Case2: Evaluated duration: {duration} h, cost: ${cost}") + duration, cost = act.evaluateAssets([asset1, asset2]) + print(f"Case3: Evaluated duration: {duration} h, cost: ${cost}") + + # Assign asset + act.assignAsset(asset1) + assert abs(act.duration - 4.5216) < 0.01, "Assigned duration does not match expected value." + assert abs(act.cost - 20194.7886) < 0.01, "Assigned cost does not match expected value." + ''' \ No newline at end of file diff --git a/famodel/irma/actions.yaml b/famodel/irma/actions.yaml new file mode 100644 index 00000000..4ad8f3df --- /dev/null +++ b/famodel/irma/actions.yaml @@ -0,0 +1,338 @@ +# ====================================================================== +# actions.yaml +# ---------------------------------------------------------------------- +# This file defines standardized marine operations actions. +# Each entry needs numeric values per specific asset in vessels.yaml. +# Action requirements will be checked against vessel capabilities for evaluation and assignment. +# +# Old format: roles, which lists asset roles, each with associated required capabilities +# New format: list of requirements, with optional -in/-out suffix + +# The code that models and checks these actions is action.calcDurationAndCost(). Structural changes here will not be reflected in the code unless changes are made there as well + +### Example action ### + +# example_action: +# objects: [] "The FAModel object types that are expected for this action" +# requirements: [] "List of capability requirements that assets must meet to perform this action" +# duration_h: 0.0 "Duration in hours" +# Hs_m: 0.0 "Wave height constraints in meters" +# description: "A description" + +# --- Mobilization --- + +mobilize: + objects: [] + requirements: + - storage + duration_h: + Hs_m: + description: "Mobilization of vessel in homeport" + +demobilize: + objects: [] + requirements: + - storage + capabilities: [] + duration_h: 1.0 + description: "Demobilization of vessel in homeport" + +load_cargo: + objects: [anchor, mooring, cable, platform, component] + # still have to figure out the aspect of loading from a port + requirements: + - chain_storage-in + - rope_storage-in + - line_handling + - lifting + - storage-in + - station_keeping + - cable_storage-in + - cable_handling + duration_h: + Hs_m: + description: "Load-out of generic components from port or vessel onto vessel." + + +# --- Towing & Transport --- + +transit_linehaul_self: + objects: [anchor] + requirements: + - propulsion + duration_h: + description: "Self-propelled line-haul between port and site" + +transit_linehaul_tug: + objects: [anchor] + requirements: + - propulsion + duration_h: + description: "Tugged line-haul convoy (tug + barge) between port and site" + +transit_onsite_self: + objects: [anchor] + requirements: + - propulsion + duration_h: + description: "Self-propelled in-field move between site locations" + +transit_onsite_tug: + objects: [anchor] + requirements: + - propulsion + - towing + duration_h: + description: "Tug + barge in-field move between site locations" + + +tow: + objects: [platform] + requirements: + - propulsion + - towing + - station_keeping + duration_h: + Hs_m: + description: "Towing floating structures (e.g., floaters, barges) to site; includes station-keeping." + +transport_components: + objects: [component] + requirements: + - propulsion + - storage + - lifting + - station_keeping + duration_h: + Hs_m: + description: "Transport of large components such as towers, nacelles, blades, or jackets." + +at_site_support: + objects: [] + requirements: + - propulsion + duration_h: + Hs_m: + description: "Transport of vessel around the site to provide support." + +# --- Mooring & Anchors --- + +install_anchor: + objects: [anchor, component] + requirements: + - storage-out + - anchor_overboarding + - anchor_lowering + - anchor_orienting + - anchor_embedding + - station_keeping + - monitoring_system + # - survey <-- typically done before installation? + duration_h: + Hs_m: + description: "Anchor installation (suction, driven, helical, DEA, SEPLA) with tensioning and verification." + +retrieve_anchor: + objects: [anchor, component] + requirements: + - storage-in + - anchor_removal + - station_keeping + duration_h: + Hs_m: + description: "Anchor retrieval, including break-out and recovery to deck." + + +load_mooring: + objects: [mooring, component] + requirements: + - chain_storage-in + - rope_storage-in + - line_handling + - station_keeping + duration_h: + Hs_m: + description: "Load-out of mooring lines and components from port or vessel onto vessel." + + +lay_mooring: + objects: [mooring, component] + requirements: + - propulsion + - chain_storage-out + - rope_storage-out + - line_handling + - station_keeping + - subsea_connection + duration_h: + Hs_m: + description: "Laying mooring lines and connection to anchors." + + +mooring_hookup: + objects: [mooring, component, platform] + requirements: + - chain_storage # <-- what is this for? + - rope_storage # <-- what is this for? + - line_handling + - towing # <-- may want to tweak this to be a more generic term that includes bollard pull + - mooring_work + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Hook-up of mooring lines to floating platforms, including pretensioning." + +# --- Heavy Lift & Installation --- + +install_wec: + objects: [platform] + requirements: + - storage + - platform_handling + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Lifting, placement and securement of wave energy converters (WECs) onto moorings, including alignment, connection of power/data umbilicals and verification via ROV." + +install_semisub: + objects: [platform] + requirements: + - storage + - lifting + #- pumping + - towing + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Wet tow arrival, station-keeping, ballasting/trim, mooring hookup and pretensioning, ROV verification and umbilical connections as needed." + +install_spar: + objects: [platform] + requirements: + - storage + - lifting + #- pumping + - towing + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Arrival and upending via controlled ballasting, station-keeping, fairlead/messenger handling, mooring hookup and pretensioning with ROV confirmation. Heavy-lift support may be used during port integration." + +install_tlp: + objects: [platform] + requirements: + - storage + - lifting + #- pumping + - towing + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Tendon porch alignment, tendon hookup, sequential tensioning to target pretension, verification of offsets/RAOs and ROV checks." + +install_wtg: + objects: [turbine] + requirements: + - storage + - lifting + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Installation of wind turbine generator including tower, nacelle and blades." +# --- Cable Operations --- + +lay_cable: + objects: [cable] + requirements: + - cable_storage-out + - cable_laying + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Laying static/dynamic power cables, including burial where required." + +cable_hookup: + objects: [cable, component, platform] + requirements: + - cable_storage + - cable_handling + - towing + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Hook-up of cable to floating platforms, including pretensioning." + +retrieve_cable: + objects: [cable] + requirements: + - cable_storage-in + - cable_handling + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Cable recovery operations, including cutting, grappling and retrieval." + + # Lay and bury in a single pass using a plough +lay_and_bury_cable: + objects: [cable] + requirements: + - propulsion + - cable_storage + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Simultaneous lay and plough burial; continuous QA via positioning + MBES/SSS, with post-pass verification." + +# Backfill trench or stabilize cable route using rock placement +backfill_rockdump: + objects: [cable] + requirements: + - storage + - propulsion + - station_keeping + - monitoring_system + - rock_placement + duration_h: + Hs_m: + description: "Localized rock placement to stabilize exposed cables, infill trenches or provide scour protection. Includes real-time positioning and sonar verification of rock placement." + +# --- Survey & Monitoring --- + +site_survey: + objects: [] + requirements: + - survey + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Pre-installation site survey including bathymetry, sub-bottom profiling and positioning." + +monitor_installation: + objects: [anchor, mooring, component, platform, cable] + requirements: + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Real-time monitoring of installation operations using ROV and sensor packages." + +diver_support: + objects: [] + requirements: + - survey + - station_keeping + - monitoring_system + duration_h: + Hs_m: + description: "Divers site survey including monitoring and positioning." \ No newline at end of file diff --git a/famodel/irma/assets.py b/famodel/irma/assets.py new file mode 100644 index 00000000..d9bedf50 --- /dev/null +++ b/famodel/irma/assets.py @@ -0,0 +1,505 @@ +"Classes for vessels and port" +# Adapted from Rudy's original work and Ryan's updates + +import yaml +from copy import deepcopy + +class Asset: + ''' + Base class for vessel or port used in installation or maintenance. + + Attributes + ---------- + name : str + Name of the vessel. + specs : dict + Specifications of the vessel. + state : dict + Current state of the vessel including position, cargo, etc. + ''' + + def __init__(self, file = None, vesselDisc=None): + """ + Initialize a Vessel object from a configuration file. + + Parameters + ---------- + config_file : str + Path to the vessel configuration file. + + Returns + ------- + None + """ + + if vesselDisc is None and file is not None: + with open(file) as f: + vesselDisc = yaml.load(f, Loader=yaml.FullLoader) + elif vesselDisc is not None and file is None: + pass + else: + raise ValueError("Either vesselDisc or file must be provided.") + + # Set up general attributes from inputs + self.name = vesselDisc.get('name', "Unnamed Vessel") + self.type = vesselDisc.get('type', "Untyped Vessel") + self.specs = vesselDisc['specs'] + self.state = { + "spool_storage": self.specs['storage_specs']['max_spool_capacity'], + "deck_storage": self.specs['storage_specs']['max_deck_space'], + "cargo_mass": self.specs['storage_specs']['max_cargo'], + "assigned_materials" : [], + "log": [] + } + + # additional initialization should be done in the vessel or port subclass init + + + def logState(self, time, new_state): + """ + Log and update the vessel state. + + Parameters + ---------- + time : float + Current simulation time. + new_state : dict + New state information to update and log. + + Returns + ------- + None + """ + self.state.update(new_state) + self.state["log"].append({"time": time, "state": new_state}) + + + def getState(self, t): + """ + Retrieve vessel state at a specific time. + + Parameters + ---------- + t : float + Time at which to retrieve the vessel state. + + Returns + ------- + state : dict + The vessel state at time t, or None if no state exists before time t. + """ + return next((log for log in reversed(self.state["log"]) if log["time"] <= t), None) + + + + + + +class Vessel(Asset): + """ + Represents a vessel used in the installation process. + + Attributes + ---------- + name : str + Name of the vessel. + specs : dict + Specifications of the vessel. + state : dict + Current state of the vessel including position, cargo, etc. + """ + + def __init__(self, info): + ''' + Initialize a Vessel object from a configuration file or dict. + + Parameters + ---------- + config_file : str + Path to the vessel configuration file. + ''' + + # Initialize the base class + Asset.__init__(self, info) + + + # Set up the action structures + self.transit_to = Action("transit_to") + self.transit_from = Action("transit_from") + self.mobilize_material = Action("mobilize") + self.install = Action("install") + + def get_mobilize_action(self, pkg): + """ + Mobilize action for the vessel at port. + + This is a collection of code that looked duplicated between the vessel file and the install_helpers file. + More than anything it helps give sense of how to calculate the mobilization time based on the vessel specs and the package of materials. + + Parameters + ---------- + pkg : dict + The package of materials to be mobilized. + + Returns + ------- + Action + Action for mobilizing the vessel. + """ + + # Old vessel mobilize action + # mobilize_material.addItem("load spooling", duration=1) + # mobilize_material.addItem("load line", duration=2, dependencies=[("self", "load spooling")]) + # mobilize_material.addItem("load anchor", duration=1) + # mobilize_material.addItem("load gear", duration=2) + # mobilize_material.addItem("seafasten", duration=3, dependencies=[ + # ("self", "load spooling"), ("self", "load line"), + # ("self", "load anchor"), ("self", "load gear") + # ]) + + # Old Mobilize function + # mobilize_material.addItem("mobilize_vessel", duration=self.specs['vessel_specs']['mobilization_time'], dependencies=[]) + + # Mobilize action from install_helpers + winch_speed = self.specs['storage_specs']['winch_speed']*60 # m/hr + anchor_loading_speed = self.specs['storage_specs']['anchor_loading_speed'] + + self.mobilize_material.addItem("load spooling", duration=1, dependencies=[]) + self.mobilize_material.addItem("load line", duration=0, dependencies=[self.mobilize_material.items["load spooling"]]) # these need to be ActionItems in an Action object + self.mobilize_material.addItem("load anchor", duration=0, dependencies=[]) + self.mobilize_material.addItem("load gear", duration=2, dependencies=[]) + self.mobilize_material.addItem("seafasten", duration=3, dependencies=[ # these need to be ActionItems in an Action object + self.mobilize_material.items["load spooling"], self.mobilize_material.items["load line"], + self.mobilize_material.items["load anchor"], self.mobilize_material.items["load gear"] + ]) + + for key, item in pkg.items(): + item['obj'].inst['mobilized'] = True + if key.startswith("sec"): # agnostic to line type + self.mobilize_material.items["load line"].duration += item['length'] / winch_speed + self.state['spool_storage'] -= item['length'] + + elif key.startswith("anchor"): # anchor + if item['load'] > self.specs['storage_specs']['max_deck_load']: + raise ValueError(f"item {key} has a load higher than what the vessel can withstand.") + + self.mobilize_material.items["load anchor"].duration += anchor_loading_speed # Assuming 1 anchor load = 1 * speed + self.state['deck_storage'] -= item['space'] + + self.state['cargo_mass'] -= item['mass'] # remaining capacity + self.state['assigned_materials'].append(item['obj']) + + return self.mobilize_material + + def mob(self, time, **kwargs): + """ + + This function is not used yet. Example of what considering port location could look like. + + Initialize the vessel and mobilize to port + + Parameters + ---------- + time : float + The current simulation time. + location : str + The target location for mobilization. + + Returns + ------- + None + """ + # Duration of the activity + duration = self.specs['vessel_specs']['mobilization_time'] + + # Vessel location at port + portLocation = kwargs.get('port_r', [0, 0]) + self.r = portLocation + self.state["location"] = self.r + self.state["preceeds"] = "material_mob" + + # Get vessel latest activity + log = self.getState(time) + if not log: + time=duration + else: + time = log["time"] + duration + + self.logState(time=time, new_state=self.state) + + def get_transit_to_action(self, distance2port): + """ + Transit actions for the vessel to a destination from port. + + Parameters + ---------- + distance2port : float + The distance to the site from port. + + Returns + ------- + transit_to : Action + Action for transiting to the site from port. + """ + + self.transit_to.addItem("transit_to_site", duration=distance2port/self.specs['transport_specs']['transit_speed'], dependencies=[self.mobilize_material.items["seafasten"]]) # these need to be ActionItems in an Action object + + return self.transit_to + + + def get_install_action(self, pkg): + """ + Creates an action item for installing a materials package from a vessel. + + Parameters + ---------- + pkg : dict + The package of materials to be installed. + + Returns + ------- + action : dict + The action item for installing the materials. + """ + + # set up structure for filling in based on pkg + self.install.addItem("position onsite", duration=0, dependencies=[]) + self.install.addItem("site survey", duration=0, dependencies=[self.install.items["position onsite"]]) + self.install.addItem("install anchor", duration=0, dependencies=[self.install.items["position onsite"], self.install.items["site survey"]]) + self.install.addItem("rerig deck", duration=0, dependencies=[self.install.items["position onsite"], self.install.items["install anchor"]]) + self.install.addItem("install line", duration=0, dependencies=[self.install.items["install anchor"], self.install.items["rerig deck"]]) + + + def installItem(key): + ''' + NOT A PUBLIC FUNCTION + This function installs an item and its dependencies. + It checks if the item is already installed and if not, it installs its dependencies first. + Then, it installs the item itself and updates the vessel state. + + Parameters + ---------- + key : str + The key of the item to be installed. + + Returns + ------- + None + ''' + item = pkg.get(key) + for dep in item['dependencies']: + if not pkg[dep]['obj'].inst['installed']: + installItem(dep) + + if key.startswith("anchor"): + self.install.items["position onsite"].duration = 2 # from PPI (only once per anchor) + self.install.items["site survey"].duration = 2 # from PPI + if item['obj'].dd['design']['type']=='suction': + pile_fixed = self.specs["vessel_specs"]["pile_fixed_install_time"] + pile_depth = 0.005 * abs(item['obj'].r[-1]) + + self.install.items["install anchor"].duration = pile_fixed + pile_depth + else: + # support for other anchor types + pass + + self.state['deck_storage'] += item.get('space', 0) + + elif key.startswith("sec"): + if self.install.items["install line"].duration ==0: + # first line to install + self.install.items["rerig deck"].duration = self.specs['storage_specs'].get('rerig_deck', 0) + winch_speed = self.specs['storage_specs']['winch_speed']*60 # m/hr + line_fixed = self.specs["vessel_specs"]["line_fixed_install_time"] + line_winch = item['length']/winch_speed + self.install.items["install line"].duration += line_fixed + line_winch + self.install.items["install line"].dependencies = [self.install.items["install anchor"], self.install.items["rerig deck"]] + + self.state['spool_storage'] += item.get('length', 0) + + item['obj'].inst['installed'] = True + self.state['cargo_mass'] += item['mass'] + self.state['assigned_materials'].remove(item['obj']) + + for key in pkg.keys(): + installItem(key) + + return self.install + + def get_transit_from_action(self, distance2port, empty_factor=1.0): + """ + Transit actions for the vessel from a destination to port. + + Parameters + ---------- + distance2port : float + The distance to the site from port. + empty_factor : float, optional + The factor to account for empty return trip. + + Returns + ------- + transit_from : Action + Action for transiting from the site to port. + """ + + self.transit_from.addItem("transit_from_site", duration= empty_factor * distance2port/self.specs['transport_specs']['transit_speed'], dependencies=[self.transit_to.items["transit_to_site"], self.install.items["install anchor"], self.install.items["install line"]]) + + return self.transit_from + + + def logState(self, time, new_state): + """ + Log and update the vessel state. + + Parameters + ---------- + time : float + Current simulation time. + new_state : dict + New state information to update and log. + + Returns + ------- + None + """ + self.state.update(new_state) + self.state["log"].append({"time": time, "state": new_state}) + + + def getState(self, t): + """ + Retrieve vessel state at a specific time. + + Parameters + ---------- + t : float + Time at which to retrieve the vessel state. + + Returns + ------- + state : dict + The vessel state at time t, or None if no state exists before time t. + """ + return next((log for log in reversed(self.state["log"]) if log["time"] <= t), None) + + +"Port base class" + +__author__ = "Rudy Alkarem" + +import yaml +from copy import deepcopy +import numpy as np + +class Port: + ''' + Represents a port for staging and logistics operations. + + Attributes + ---------- + name : str + Name of the port. + capacity : dict + Dictionary containing capacity parameters of the port. + storage : dict + Current storage state of the port. + ''' + + def __init__(self, file): + ''' + Initialize a Port object from a configuration file. + + Parameters + ---------- + config_file : str + Path to the port configuration file. + ''' + + # Initialize the base class + Asset.__init__(self, info) + + + + self.r = [portDisc['location']['lattitude'], portDisc['location']['longitude']] + self.pkgs = {} + + # misc + self.reel_refArea = 13.5 # m^2 + self.reel_refCap = 735 # m + self.chain_refArea = 20.5 # m^2 + self.chain_refLngth = 100 # m + + + def staging(self, pkgs): + """ + Perform staging and update port storage states. + + Parameters + ---------- + pkgs : list + List of packages to be staged at the port. + + Returns + ------- + remaining_pkgs : list or None + Packages that couldn't be staged due to capacity constraints, + or None if all packages were staged successfully. + """ + remainingPkgs = deepcopy(pkgs) + # Get some information about polyester and chain lines + polyLineLength = 0 + chinLineLength = 0 + polyPkgs = {} + chinPkgs = {} + for pkgName, pkg in pkgs.items(): + if pkgName.startswith("sec"): + if pkg["obj"]["type"]["material"]=="polyester": + polyLineLength += pkg["length"] + polyPkgs.append(pkg) + if pkg["obj"]["type"]["material"]=="chain": + chinLineLength += pkg["length"] + chinPkgs.append(pkg) + + # TODO: can we generalize this beyond polyester and chain? Any number of lines and line types? + # Store polyester lines + # Compute number of reels required to roll all polyester lines in pkgs + reelCount = np.ceil(polyLineLength/self.reel_refCap) + reelAreaTot = reelCount * self.reel_refArea + if self.state["yard_storage"] > reelAreaTot: + self.state["yard_storage"] -= reelAreaTot + for key in polyPkgs: + remainingPkgs.pop(key, None) + self.pkgs.update(polyPkgs) + + # Store chains + # Compute area acquired by chains + area_per_unit_meter = self.chain_refArea/self.chain_refLngth # for a pile of 1.5m tall [135mm chain nominal diameter] + chinAreaTot = area_per_unit_meter * chinLineLength + if self.state["yard_storage"] > chinAreaTot: + self.state["yard_storage"] -= chinAreaTot + for key in chinPkgs: + remainingPkgs.pop(key, None) + self.pkgs.update(chinPkgs) + + # remaining packages: + for pkgName, pkg in pkgs.items(): + if pkgName.startswith("anchor"): + if self.state["yard_storage"] > pkg["space"]: + self.state["yard_storage"] -= pkg["space"] + remainingPkgs.pop(pkgName, None) + self.pkgs.append(pkg) + if pkgName.startswith("conn"): + 'add logic to stage clump weights and buoys' + pass + + if remainingPkgs=={}: + remainingPkgs=None + return remainingPkgs + + + + + + + diff --git a/famodel/irma/calwave_action.py b/famodel/irma/calwave_action.py new file mode 100644 index 00000000..9ba4aa20 --- /dev/null +++ b/famodel/irma/calwave_action.py @@ -0,0 +1,1530 @@ +"""Action base class""" + +import numpy as np +import matplotlib.pyplot as plt + +import moorpy as mp +from moorpy.helpers import set_axes_equal +from moorpy import helpers +import yaml +from copy import deepcopy + +#from shapely.geometry import Point, Polygon, LineString +import famodel.seabed_tools as sbt +from famodel.mooring.mooring import Mooring +from famodel.platform.platform import Platform +from famodel.anchors.anchor import Anchor +from famodel.mooring.connector import Connector +from famodel.substation.substation import Substation +from famodel.cables.cable import Cable +from famodel.cables.dynamic_cable import DynamicCable +from famodel.cables.static_cable import StaticCable +from famodel.cables.cable_properties import getCableProps, getBuoyProps, loadCableProps,loadBuoyProps +from famodel.cables.components import Joint +from famodel.turbine.turbine import Turbine +from famodel.famodel_base import Node + +# Import select required helper functions +from famodel.helpers import (check_headings, head_adjust, getCableDD, getDynamicCables, + getMoorings, getAnchors, getFromDict, cleanDataTypes, + getStaticCables, getCableDesign, m2nm, loadYAML, + configureAdjuster, route_around_anchors) + + +def incrementer(text): + ''' + Increments the last integer found in a string. + + Inputs + ------ + `text` : `str` + The input string to increment. + + Returns + ------- + `str` + The incremented string. + ''' + split_text = text.split()[::-1] + for ind, spl in enumerate(split_text): + try: + split_text[ind] = str(int(spl) + 1) + break + except ValueError: + continue + return " ".join(split_text[::-1]) + + +def increment_name(name): + ''' + Increments an end integer after a dash in a name. + + Inputs + ------ + `name` : `str` + The input name string. + + Returns + ------- + `str` + The incremented name string. + ''' + name_parts = name.split(sep='-') + + # if no numeric suffix yet, add one + if len(name_parts) == 1 or not name_parts[-1].isdigit(): + name = name+'-0' + # otherwise there must be a suffix, so increment it + else: + name_parts[-1] = str( 1 + int(name_parts[-1])) + + name = '-'.join(name_parts) # reassemble name string + + return name + + +class Action(): + ''' + An Action is a general representation of a marine operations action + that involves manipulating a system/design/structure using assets/ + equipment. The Action base class contains generic routines and parameters. + Specialized routines for performing each action should be set up in + subclasses. + ''' + + def __init__(self, actionType, name, **kwargs): + '''Create an action object... + It must be given a name. + The remaining parameters should correspond to items in the actionType dict... + + Inputs + ---------- + `actionType` : `dict` + Dictionary defining the action type (typically taken from a yaml). + `name` : `string` + A name for the action. It may be appended with numbers if there + are duplicate names. + `kwargs` + Additional arguments may depend on the action type and typically + include a list of FAModel objects that are acted upon, or + a list of dependencies (other action names/objects). + + Returns + ------- + `None` + ''' + + # list of things that will be controlled during this action + self.assets = {} # dict of named roles for the vessel(s) or port required to perform the action + self.requirements = {} # capabilities required of each role (same keys as self.assets) + self.requirements2 = {} # capabilities required for the action + self.objectList = [] # all objects that could be acted on + self.dependencies = {} # list of other actions this one depends on + + self.actionType = actionType # <— keep the YAML dict on the instance + + self.type = getFromDict(actionType, 'type', dtype=str) + self.name = name + self.status = 0 # 0, waiting; 1=running; 2=finished + + self.duration = getFromDict(actionType, 'duration', default=0) # this will be overwritten by calcDurationAndCost. TODO: or should it overwrite any duration calculation? + self.cost = 0 # this will be overwritten by calcDurationAndCost + self.ti = 0 # action start time [h?] + self.tf = 0 # action end time [h?] + + self.supported_objects = [] # list of FAModel object types supported by the action + + ''' + # Create a dictionary of supported object types (with empty entries) + if 'objects' in actionType: #objs = getFromDict(actionType, 'objects', shape=-1, default={}) + for obj in actionType['objects']: # go through keys in objects dictionary + self.objectList[obj] = None # make blank entries with the same names + + + # Process objects according to the action type + if 'objects' in kwargs: #objects = getFromDict(kwargs, objects, default=[]) + for obj in kwargs['objects']: + objType = obj.__class__.__name__.lower() + if objType in self.objectList: + self.objectList[objType] = obj + else: + raise Exception(f"Object type '{objType}' is not in the action's supported list.") + ''' + + # Create placeholders for asset roles based on the "requirements" + if 'roles' in actionType: + for role, caplist in actionType['roles'].items(): + self.requirements[role] = {key: {} for key in caplist} # each role requirment holds a dict of capabilities with each capability containing a dict of metrics and values, metrics dict set to empty for now. + for key in caplist: + self.requirements2[key] = {} # all requirements diction needed for the action. + self.assets[role] = None # placeholder for the asset assigned to this role + + # Process objects to be acted upon. NOTE: must occur after requirements and assets placeholders have been assigned. + # make list of supported object type names + if 'objects' in actionType: + if isinstance(actionType['objects'], list): + self.supported_objects = actionType['objects'] + elif isinstance(actionType['objects'], dict): + self.supported_objects = list(actionType['objects'].keys()) + + # Add objects to the action's object list as long as they're supported + if 'objects' in kwargs: + self.assignObjects(kwargs['objects']) + + # Process dependencies + if 'dependencies' in kwargs: + for dep in kwargs['dependencies']: + self.dependencies[dep.name] = dep + + # Process some optional kwargs depending on the action type + + + def addDependency(self, dep): + ''' + Registers other action as a dependency of this one. + + Inputs + ------ + `dep` : `Action` + The action to be added as a dependency. + + Returns + ------- + `None` + ''' + self.dependencies[dep.name] = dep + # could see if already a dependency and raise a warning if so... + + + def getMetrics(self, cap, met, obj): + ''' + Retrieves the minimum metric(s) for a given capability required to act on target object. + A metric is the number(s) associated with a capability. A capability is what an action + role requires and an asset has. + + These minimum metrics are assigned to capabilities in the action's role in `assignObjects`. + + Inputs + ------ + `cap` : `str` + The capability for which the metric is to be retrieved. + `met` : `dict` + The metrics dictionary containing any existing metrics for the capability. + `obj` : FAModel object + The target object on which the capability is to be acted upon. + + Returns + ------- + `metrics` : `dict` + The metrics and values for the specified capability and object. + + ''' + + metrics = met # metrics dict with following form: {metric_1 : required_value_1, ...}. met is assigned here in case values have already been assigned + objType = obj.__class__.__name__.lower() + + """ + Note to devs: + This function contains hard-coded evaluations of all the possible combinations of capabilities and objects. + The intent is we generate the minimum required of a given to work with the object. An + example would be minimum bollard pull required to tow out a platform. The capabilities (and their metrics) + are from capabilities.yaml and the objects are from objects.yaml. There is a decent ammount of assumptions + made here so it is important to document sources where possible. + + Some good preliminary work on this is in https://github.com/FloatingArrayDesign/FAModel/blob/IOandM_development/famodel/installation/03_step1_materialItems.py + + ### Code Explanation ### + This function has the following structure + + ``` + if cap == : + # some comments + + if objType == 'mooring': + metric_value = calc based on obj + elif objType == 'platform': + metric_value = calc based on obj + elif objType == 'anchor': + metric_value = calc based on obj + elif objType == 'component': + metric_value = calc based on obj + elif objType == 'turbine': + metric_value = calc based on obj + elif objType == 'cable': + metric_value = calc based on obj + else: + metric_value = -1 + + # Assign the capabilties metrics (keep existing metrics already in dict if larger than calc'ed value) + metrics[] = metric_value if metric_value > metrics.get() else metrics.get() + ``` + + Some of the logic for checking object types can be omitted if it doesnt make sense. For example, the chain_locker capability + only needs to be checked against the Mooring object. The comment `# object logic checked` shows that the logic in that capability + has been thought through. + + A metric_value of -1 indicates the object is not compatible with the capability. This is indicated by a warning printed at the end. + + A completed example of what this can look like is the line_reel capability. + """ + + + if cap == 'deck_space': + # logic for deck_space capability (platforms and sites not compatible) + # TODO: how do we account for an action like load_mooring (which has two roles, + # representing vessels to be loaded). The combined deck space of the carriers + # should be the required deck space for the action. Right now I believe it is + # set up that only one asset can fulfill the capability minimum. + + # object logic checked + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['area_m2'] = None if None > metrics.get('area_m2') else metrics.get('area_m2') + # metrics['max_load_t'] = None if None > metrics.get('max_load_t') else metrics.get('max_load_t') + + elif cap == 'chain_locker': + # logic for chain_locker capability (only mooring objects compatible) + # object logic checked + + if objType == 'mooring': + + # set baseline values for summation + vol = 0 + length = 0 + + for i, sec in enumerate(obj.dd['sections']): # add up the volume and length of all chain in the object + if sec['type']['chain']: + diam = sec['type']['d_nom'] # diameter [m] + vol += 0.0 # TODO: calculate chain_locker volume from sec['L'] and diam. Use Delmar data from Rudy. Can we make function of chain diam? + length += sec['L'] # length [m] + + else: + vol = -1 + + # Assign the capabilties metrics + metrics['volume_m3'] = vol if vol > metrics.get('volume_m3') else metrics.get('volume_m3') + + elif cap == 'line_reel': + # logic for line_reel capability (only mooring objects compatible) + # object logic checked, complete + + if objType == 'mooring': + + # set baseline values for summation + vol = 0 + length = 0 + + for i, sec in enumerate(obj.dd['sections']): # add up the volume and length of all non_chain line in the object + if not sec['type']['chain']: # any line type thats not chain + vol += sec['L'] * np.pi * (sec['type']['d_nom'] / 2) ** 2 # volume [m^3] + length += sec['L'] # length [m] + + else: + vol = -1 + length = -1 + + # Assign the capabilties metrics + metrics['volume_m3'] = vol if vol > metrics.get('volume_m3') else metrics.get('volume_m3') + metrics['rope_capacity_m'] = length if length > metrics.get('rope_capacity_m') else metrics.get('rope_capacity_m') + + elif cap == 'cable_reel': + # logic for cable_reel capability (only cable objects compatible) + # object logic checked + vol = 0 + length = 0 + ''' + if objType == 'cable': + for cable in cables: # TODO: figure out this iteration + if cable is cable and not other thing in cables object: # TODO figure out how to only check cables, not j-tubes or any other parts + vol += cable['L'] * np.pi * (cable['type']['d_nom'] / 2) ** 2 + length += cable['L'] # length [m] + else: + vol = -1 + length = -1 + ''' + # Assign the capabilties metrics + metrics['volume_m3'] = vol if vol > metrics.get('volume_m3') else metrics.get('volume_m3') + metrics['cable_capacity_m'] = length if length > metrics.get('cable_capacity_m') else metrics.get('cable_capacity_m') + + elif cap == 'winch': + # logic for winch capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # # Assign the capabilties metrics + # metrics['max_line_pull_t'] = None if None > metrics.get('max_line_pull_t') else metrics.get('max_line_pull_t') + # metrics['brake_load_t'] = None if None > metrics.get('brake_load_t') else metrics.get('brake_load_t') + # metrics['speed_mpm'] = None if None > metrics.get('speed_mpm') else metrics.get('speed_mpm') + + elif cap == 'bollard_pull': + # per calwave install report (section 7.2): bollard pull can be described as function of vessel speed and load + + # logic for bollard_pull capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['max_force_t'] = None if None > metrics.get('max_force_t') else metrics.get('max_force_t') + + elif cap == 'crane': + # logic for deck_space capability (all compatible) + # object logic checked + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['capacity_t'] = None if None > metrics.get('capacity_t') else metrics.get('capacity_t') + # metrics['hook_height_m'] = None if None > metrics.get('hook_height_m') else metrics.get('hook_height_m') + + elif cap == 'station_keeping': + # logic for station_keeping capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['type'] = None if None > metrics.get('type') else metrics.get('type') + + elif cap == 'mooring_work': + # logic for mooring_work capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['line_types'] = None if None > metrics.get('line_types') else metrics.get('line_types') + # metrics['stern_roller'] = None if None > metrics.get('stern_roller') else metrics.get('stern_roller') + # metrics['shark_jaws'] = None if None > metrics.get('shark_jaws') else metrics.get('shark_jaws') + # metrics['towing_pin_rating_t'] = None if None > metrics.get('towing_pin_rating_t') else metrics.get('towing_pin_rating_t') + + elif cap == 'pump_surface': + # logic for pump_surface capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['pressure_bar'] = None if None > metrics.get('pressure_bar') else metrics.get('pressure_bar') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'pump_subsea': + # logic for pump_subsea capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['pressure_bar'] = None if None > metrics.get('pressure_bar') else metrics.get('pressure_bar') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'pump_grout': + # logic for pump_grout capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['flow_rate_m3hr'] = None if None > metrics.get('flow_rate_m3hr') else metrics.get('flow_rate_m3hr') + # metrics['pressure_bar'] = None if None > metrics.get('pressure_bar') else metrics.get('pressure_bar') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'hydraulic_hammer': + # logic for hydraulic_hammer capability (only platform and anchor objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': # for fixed bottom installations + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['energy_per_blow_kJ'] = None if None > metrics.get('energy_per_blow_kJ') else metrics.get('energy_per_blow_kJ') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'vibro_hammer': + # logic for vibro_hammer capability (only platform and anchor objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': # for fixed bottom installations + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['centrifugal_force_kN'] = None if None > metrics.get('centrifugal_force_kN') else metrics.get('centrifugal_force_kN') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'drilling_machine': + # logic for drilling_machine capability (only platform, anchor, and cable objects compatible) + # Considering drilling both for export cables, interarray, and anchor/fixed platform install + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'torque_machine': + # logic for torque_machine capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['torque_kNm'] = None if None > metrics.get('torque_kNm') else metrics.get('torque_kNm') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'cable_plough': + # logic for cable_plough capability (only cable objects compatible) + # object logic checked + if objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['power_kW'] = None if None > metrics.get('power_kW') else metrics.get('power_kW') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'rock_placement': + # logic for rock_placement capability (only platform, anchor, and cable objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['placement_method'] = None if None > metrics.get('placement_method') else metrics.get('placement_method') + # metrics['max_depth_m'] = None if None > metrics.get('max_depth_m') else metrics.get('max_depth_m') + # metrics['accuracy_m'] = None if None > metrics.get('accuracy_m') else metrics.get('accuracy_m') + # metrics['rock_size_range_mm'] = None if None > metrics.get('rock_size_range_mm') else metrics.get('rock_size_range_mm') + + elif cap == 'container': + # logic for container capability (only platform, turbine, and cable objects compatible) + # object logic checked + if objType == 'wec': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'rov': + # logic for rov capability (all compatible) + # object logic checked + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['class'] = None if None > metrics.get('class') else metrics.get('class') + # metrics['depth_rating_m'] = None if None > metrics.get('depth_rating_m') else metrics.get('depth_rating_m') + # metrics['weight_t'] = None if None > metrics.get('weight_t') else metrics.get('weight_t') + # metrics['dimensions_m'] = None if None > metrics.get('dimensions_m') else metrics.get('dimensions_m') + + elif cap == 'positioning_system': + # logic for positioning_system capability (only platform, anchor, and cable objects compatible) + # object logic checked + if objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['accuracy_m'] = None if None > metrics.get('accuracy_m') else metrics.get('accuracy_m') + # metrics['methods'] = None if None > metrics.get('methods') else metrics.get('methods') + + elif cap == 'monitoring_system': + # logic for monitoring_system capability + if objType == 'mooring': + pass + elif objType == 'platform': + pass + elif objType == 'anchor': + pass + elif objType == 'component': + pass + elif objType == 'turbine': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['metrics'] = None if None > metrics.get('metrics') else metrics.get('metrics') + # metrics['sampling_rate_hz'] = None if None > metrics.get('sampling_rate_hz') else metrics.get('sampling_rate_hz') + + elif cap == 'sonar_survey': + # logic for sonar_survey capability (only anchor and cable objects compatible) + # object logic checked + if objType == 'anchor': + pass + elif objType == 'cable': + pass + else: + pass + + # Assign the capabilties metrics + # metrics['types'] = None if None > metrics.get('types') else metrics.get('types') + # metrics['resolution_m'] = None if None > metrics.get('resolution_m') else metrics.get('resolution_m') + + else: + raise Exception(f"Unsupported capability '{cap}'.") + + for met in metrics.keys(): + if metrics[met] == -1: + print(f"WARNING: No metrics assigned for '{met}' metric in '{cap}' capability based on object type '{objType}'.") + + + return metrics # return the dict of metrics and required values for the capability + + + def assignObjects(self, objects): + ''' + Adds a list of objects to the actions objects list and + calculates the required capability metrics, checking objects + are valid for the actions supported objects. + + The minimum capability metrics are used by when checking for + compatibility and assinging assets to the action in `assignAsset`. + Thus this function should only be called in the intialization + process of an action. + + Inputs + ------ + `objects` : `list` + A list of FAModel objects to be added to the action. + + Returns + ------- + `None` + ''' + + for obj in objects: + + # Check compatibility, set capability metrics based on object, and assign object to action + + objType = obj.__class__.__name__.lower() # object class name + if objType not in self.supported_objects: + raise Exception(f"Object type '{objType}' is not in the action's supported list.") + else: + if obj in self.objectList: + print(f"Warning: Object '{obj}' is already in the action's object list. Capabilities will be overwritten.") + ''' + # Set capability requirements based on object + for role, caplist in self.requirements.items(): + for cap in caplist: + metrics = self.getMetrics(cap, caplist[cap], obj) # pass in the metrics dict for the cap and the obj + + self.requirements[role][cap] = metrics # assign metric of capability cap based on value required by obj + # MH: commenting our for now just so the code will run, but it may be better to make the above a separate step anyway + ''' + self.objectList.append(obj) + + + def checkAsset(self, role_name, asset): + ''' + Checks if a specified asset has sufficient capabilities to fulfil + a specified role in this action. + + Inputs + ------ + `role_name` : `string` + The name of the role to check. + `asset` : `dict` + The asset to check against the role's requirements. + + Returns + ------- + `bool` + True if the asset meets the role's requirements, False otherwise. + `str` + A message providing additional information about the check. + ''' + + # Make sure role_name is valid for this action + if not role_name in self.assets.keys(): + raise Exception(f"The specified role '{role_name}' is not a named in this action.") + + if self.assets[role_name] is not None: + return False, f"Role '{role_name}' is already filled in action '{self.name}'." + + for capability in self.requirements[role_name].keys(): + + if capability in asset['capabilities'].keys(): # check capability is in asset + + # TODO: does this work if there are no metrics in a capability? This should be possible, as not all capabilities will require a constraint. + for metric in self.requirements[role_name][capability].keys(): # loop over the capacity requirements for the capability (if more than one) + + if metric not in asset['capabilities'][capability].keys(): # value error because capabilities are defined in capabilities.yaml. This should only be triggered if something has gone wrong (i.e. overwriting values somewhere) + raise ValueError(f"The '{capability}' capability does not have metric: '{metric}'.") + + if self.requirements[role_name][capability][metric] > asset['capabilities'][capability][metric]: # check requirement is met + return False, f"The asset does not have sufficient '{metric}' for '{capability}' capability in '{role_name}' role of '{self.name}' action." + + return True, 'All capabilities in role met' + + else: + return False, f"The asset does not have the '{capability}' capability for '{role_name}' role of '{self.name}' action." # a capability is not met + + + def calcDurationAndCost(self): + ''' + Calculates duration and cost for the action. The structure here is dependent on `actions.yaml`. + TODO: finish description + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + + # Check that all roles in the action are filled + for role_name in self.requirements.keys(): + if self.assets[role_name] is None: + raise Exception(f"Role '{role_name}' is not filled in action '{self.name}'. Cannot calculate duration and cost.") + + # Initialize cost and duration + self.cost = 0.0 # [$] + self.duration = 0.0 # [h] + + """ + Note to devs: + The code here calculates the cost and duration of an action. Each action in the actions.yaml has a hardcoded 'model' + here that is used to evaluate the action based on the assets assigned to it. + + This is where a majority of assumptions about the action's behavior are made, so it is key to cite references behind + any abnormal approaches. + + Some good preliminary work on this is in https://github.com/FloatingArrayDesign/FAModel/blob/IOandM_development/famodel/installation/ + and in assets.py + """ + + # --- Mobilization --- + if self.type == 'mobilize': + # Hard-coded example of mobilization times based on vessel type + durations = { + 'crane_barge': 3.0, + 'research_vessel': 1.0 + } + for role_name, vessel in self.assets.items(): + vessel_type = vessel['type'].lower() + for key, duration in durations.items(): + if key in vessel_type: + self.duration += duration + break + + elif self.type == 'demobilize': + # Hard-coded example of demobilization times based on vessel type + durations = { + 'crane_barge': 3.0, + 'research_vessel': 1.0 + } + for role_name, vessel in self.assets.items(): + vessel_type = vessel['type'].lower() + for key, duration in durations.items(): + if key in vessel_type: + self.duration += duration + elif self.type == 'load_cargo': + pass + + # --- Towing & Transport --- + elif self.type == 'tow': + pass + + elif self.type == 'transit_linehaul_self': + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + vessel = self.assets.get('vessel') or self.assets.get('operator') or self.assets.get('carrier') + if vessel is None: + raise ValueError('transit_linehaul_self: no vessel assigned.') + + tr = vessel['transport'] + + # distance + dist_m = float(tr['route_length_m']) + + # speed: linehaul uses transport.cruise_speed_mps + speed_mps = float(tr['cruise_speed_mps']) + + dur_h = dist_m/speed_mps/3600.0 + self.duration += dur_h + # cost + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + + elif self.type == 'transit_linehaul_tug': + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + tug = self.assets.get('operator') or self.assets.get('vessel') + barge = self.assets.get('carrier') + if tug is None or barge is None: + raise ValueError('transit_linehaul_tug: need tug (operator) and barge (carrier).') + + tr_b = barge.get('transport', {}) + tr_t = tug.get('transport', {}) + + # distance: prefer barge’s transport + dist_m = float(tr_b.get('route_length_m', tr_t['route_length_m'])) + + # speed for convoy linehaul: barge (operator) cruise speed + operator = self.assets.get('operator') or self.assets.get('vessel') + if operator is None: + raise ValueError('transit_linehaul_tug: operator (barge) missing.') + + speed_mps = float(operator['transport']['cruise_speed_mps']) + + dur_h = dist_m/speed_mps/3600.0 + self.duration += dur_h + + # cost + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + elif self.type == 'transit_onsite_self': + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + # vessel (Beyster) required + vessel = self.assets.get('vessel') or self.assets.get('operator') or self.assets.get('carrier') + if vessel is None: + raise ValueError('transit_onsite_self: no vessel assigned.') + + # NEW: quick vessel print + try: + print(f"[onsite_self] {self.name}: vessel={vessel.get('type')}") + except Exception: + pass + + # destination anchor from objects (required) + if not self.objectList: + raise ValueError('transit_onsite_self: destination anchor missing in objects.') + dest = self.objectList[0] + r_dest = getattr(dest, 'r', None) + + # NEW: print dest + try: + print(f"[onsite_self] {self.name}: r_dest={r_dest}") + except Exception: + pass + + # infer start from dependency chain (BFS up to depth 3) + r_start = None + from collections import deque + q, seen = deque(), set() + for dep in self.dependencies.values(): + q.append((dep, 0)); seen.add(id(dep)) + while q: + node, depth = q.popleft() + if node.objectList and hasattr(node.objectList[0], 'r'): + r_start = node.objectList[0].r + break + # if depth < 3: + # for nxt in node.dependencies.values(): + # if id(nxt) in seen: continue + # seen.add(id(nxt)); q.append((nxt, depth+1)) + + # NEW: print BFS result + try: + print(f"[onsite_self] {self.name}: r_start(BFS)={r_start}") + except Exception: + pass + + # CHANGED: fallback for first onsite leg → try centroid, else keep old zero-distance fallback + if r_start is None and r_dest is not None: + # NEW: centroid read (linehaul_to_site should set it on this action) + cent = (getattr(self, 'meta', {}) or {}).get('anchor_centroid') + if cent is None: + cent = (getattr(self, 'params', {}) or {}).get('anchor_centroid') + if cent is not None and len(cent) >= 2: + r_start = (float(cent[0]), float(cent[1])) + try: + print(f"[onsite_self] {self.name}: using centroid as r_start={r_start}") + except Exception: + pass + else: + # ORIGINAL behavior: assume zero in-field distance + r_start = r_dest + try: + print(f"[warn] {self.name}: could not infer start from deps; assuming zero in-field distance.") + except Exception: + pass + + # 2D distance [m] + from math import hypot + dx = float(r_dest[0]) - float(r_start[0]) + dy = float(r_dest[1]) - float(r_start[1]) + dist_m = hypot(dx, dy) + + # NEW: print distance + try: + print(f"[onsite_self] {self.name}: dist_m={dist_m:.1f} (start={r_start} → dest={r_dest})") + except Exception: + pass + + # onsite speed from capabilities.engine (SI) + cap_eng = vessel.get('capabilities', {}).get('engine', {}) + speed_mps = float(cap_eng['site_speed_mps']) + + self.duration += dist_m/speed_mps/3600.0 + + # NEW: print duration increment + try: + print(f"[onsite_self] {self.name}: speed_mps={speed_mps:.3f}, dT_h={dist_m/speed_mps/3600.0:.3f}, total={self.duration:.3f}") + except Exception: + pass + + # cost + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + elif self.type == 'transit_onsite_tug': + # YAML override + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + # assets required (operator = San_Diego tug; carrier = Jag barge) + operator = self.assets.get('operator') or self.assets.get('vessel') + carrier = self.assets.get('carrier') + if operator is None and carrier is None: + raise ValueError('transit_onsite_tug: no operator/carrier assigned.') + + # quick prints + try: + op_name = operator.get('type') if operator else None + ca_name = carrier.get('type') if carrier else None + print(f"[onsite_tug] {self.name}: operator={op_name} carrier={ca_name}") + except Exception: + pass + + # destination anchor from objects (required) + if not self.objectList: + raise ValueError('transit_onsite_tug: destination anchor missing in objects.') + dest = self.objectList[0] + r_dest = getattr(dest, 'r', None) + + try: + print(f"[onsite_tug] {self.name}: r_dest={r_dest}") + except Exception: + pass + + # infer start from dependency chain (BFS up to depth 3) + r_start = None + from collections import deque + q, seen = deque(), set() + for dep in self.dependencies.values(): + q.append((dep, 0)); seen.add(id(dep)) + while q: + node, depth = q.popleft() + if node.objectList and hasattr(node.objectList[0], 'r'): + r_start = node.objectList[0].r + break + # if depth < 3: + # for nxt in node.dependencies.values(): + # if id(nxt) in seen: continue + # seen.add(id(nxt)); q.append((nxt, depth+1)) + + try: + print(f"[onsite_tug] {self.name}: r_start(BFS)={r_start}") + except Exception: + pass + + # fallback for first onsite leg: use centroid if present, else zero-distance fallback + if r_start is None and r_dest is not None: + cent = (getattr(self, 'meta', {}) or {}).get('anchor_centroid') + if cent is None: + cent = (getattr(self, 'params', {}) or {}).get('anchor_centroid') + if cent is not None and len(cent) >= 2: + r_start = (float(cent[0]), float(cent[1])) + try: + print(f"[onsite_tug] {self.name}: using centroid as r_start={r_start}") + except Exception: + pass + else: + r_start = r_dest + try: + print(f"[warn] {self.name}: could not infer start from deps; assuming zero in-field distance.") + except Exception: + pass + + # 2D distance [m] + from math import hypot + dx = float(r_dest[0]) - float(r_start[0]) + dy = float(r_dest[1]) - float(r_start[1]) + dist_m = hypot(dx, dy) + + try: + print(f"[onsite_tug] {self.name}: dist_m={dist_m:.1f} (start={r_start} → dest={r_dest})") + except Exception: + pass + + # speed for convoy onsite: barge (operator) site speed + operator = self.assets.get('operator') or self.assets.get('vessel') + if operator is None: + raise ValueError('transit_onsite_tug: operator (barge) missing.') + + cap_eng = operator.get('capabilities', {}).get('bollard_pull', {}) + speed_mps = float(cap_eng['site_speed_mps']) + + self.duration += dist_m/speed_mps/3600.0 + + try: + print(f"[onsite_tug] {self.name}: speed_mps={speed_mps:.3f}, dT_h={dist_m/speed_mps/3600.0:.3f}, total={self.duration:.3f}") + except Exception: + pass + + # cost (unchanged) + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration*rate_per_hour + return self.duration, self.cost + + elif self.type == 'at_site_support': + pass + elif self.type == 'transport_components': + pass + + # --- Mooring & Anchors --- + elif self.type == 'install_anchor': + # YAML override (no model if present) + default_duration = None + try: + default_duration = getFromDict(self.actionType, 'duration_h', dtype=float) + except ValueError: + default_duration = None + + if default_duration is not None: + computed_duration_h = default_duration + + else: + # Expect an anchor object in self.objectList + if not self.objectList: + raise ValueError("install_anchor: no anchor object provided in 'objects'.") + + # 1) Relevant metrics for cost and duration + anchor = self.objectList[0] + L = anchor.dd['design']['L'] + depth_m = abs(float(anchor.r[2])) + + # 2) Winch vertical speed [mps] + v_mpm = float(self.assets['carrier']['capabilities']['winch']['speed_mpm']) + t_lower_min = depth_m/v_mpm + + # 3) Penetration time ~ proportional to L + rate_pen = 15. # [min] per [m] + t_pen_min = L*rate_pen + + # 4) Connection / release (fixed) + t_ops_min = 15 + + duration_min = t_lower_min + t_pen_min + t_ops_min + computed_duration_h = duration_min/60.0 # [h] + + # print(f'[install_anchor] yaml_duration={yaml_duration} -> used={computed_duration_h} h') + + # Duration addition + self.duration += computed_duration_h + + # Cost assessment + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + + self.cost += self.duration*rate_per_hour + + elif self.type == 'retrieve_anchor': + pass + elif self.type == 'install_mooring': + pass + elif self.type == 'mooring_hookup': + pass + + # --- Heavy Lift & Installation --- + elif self.type == 'install_wec': + pass + elif self.type == 'install_semisub': + pass + elif self.type == 'install_spar': + pass + elif self.type == 'install_tlp': + pass + elif self.type == 'install_turbine': + pass + + # --- Cable Operations --- + elif self.type == 'lay_cable': + pass + elif self.type == 'cable_hookup': + pass + elif self.type == 'retrieve_cable': + pass + elif self.type == 'lay_and_bury_cable': + pass + elif self.type == 'backfill_rockdump': + pass + + # --- Survey & Monitoring --- + elif self.type == 'site_survey': + pass + + elif self.type == 'monitor_installation': + # 1) YAML override first + try: + v = getFromDict(self.actionType, 'duration_h', dtype=float); self.duration += v + except ValueError: + try: + v = getFromDict(self.actionType, 'default_duration_h', dtype=float); self.duration += v + except ValueError: + # --- find the paired install --- + ref_install = getattr(self, 'paired_install', None) + + # fallback: BFS through deps to find an install on the same anchor + if ref_install is None: + anchor_obj = self.objectList[0] if self.objectList else None + from collections import deque + q, seen = deque(), set() + for dep in self.dependencies.values(): + q.append((dep, 0)); seen.add(id(dep)) + while q: + node, depth = q.popleft() + if getattr(node, 'type', None) == 'install_anchor': + if anchor_obj and node.objectList and node.objectList[0] is anchor_obj: + ref_install = node + break + if ref_install is None: + ref_install = node + if depth < 3: + for nxt in node.dependencies.values(): + if id(nxt) in seen: continue + seen.add(id(nxt)); q.append((nxt, depth+1)) + + # --- get install duration, compute-on-demand if needed (no side effects) --- + inst_dur = 0.0 + if ref_install is not None: + inst_dur = float(getattr(ref_install, 'duration', 0.0) or 0.0) + + # if not computed yet, safely compute and restore + if inst_dur <= 0.0 and not getattr(ref_install, '_in_monitor_pull', False): + try: + ref_install._in_monitor_pull = True # guard re-entrancy + prev_cost = ref_install.cost + prev_dur = ref_install.duration + d, _ = ref_install.calcDurationAndCost() + inst_dur = float(d) if d is not None else 0.0 + # restore to avoid double counting later + ref_install.cost = prev_cost + ref_install.duration = prev_dur + finally: + ref_install._in_monitor_pull = False + + self.duration += inst_dur + + # cost (same pattern you use elsewhere) + rate_per_hour = 0.0 + for _, asset in self.assets.items(): + rate_per_hour += float(asset['day_rate'])/24.0 + self.cost += self.duration * rate_per_hour + return self.duration, self.cost + + else: + raise ValueError(f"Action type '{self.type}' not recognized.") + + return self.duration, self.cost + + + def evaluateAssets(self, assets): + ''' + Checks assets for all the roles in the action. This calls `checkAsset()` + for each role/asset pair and then calculates the duration and + cost for the action as if the assets were assigned. Does not assign + the asset(s) to the action. WARNING: this function will clear the values + (but not keys) in `self.assets`. + + Inputs + ------ + `assets` : `dict` + Dictionary of {role_name: asset} pairs for assignment of the + assets to the roles in the action. + + Returns + ------- + `cost` : `float` + Estimated cost of using the asset. + `duration` : `float` + Estimated duration of the action when performed by asset. + ''' + + # Check each specified asset for its respective role + for role_name, asset in assets.items(): + assignable, message = self.checkAsset(role_name, asset) + if assignable: + self.assets[role_name] = asset # Assignment required for calcDurationAndCost(), will be cleared later + else: + print('INFO: '+message+' Action cannot be completed by provided asset list.') + return -1, -1 # return negative values to indicate incompatibility. Loop is terminated becasue assets not compatible for roles. + + # Check that all roles in the action are filled + for role_name in self.requirements.keys(): + if self.assets[role_name] is None: + + raise Exception(f"Role '{role_name}' is not filled in action '{self.name}'. Cannot calculate duration and cost.") # possibly just a warning and not an exception? + + + duration, cost = self.calcDurationAndCost() + + for role_name in assets.keys(): # Clear the assets dictionary + assets[role_name] = None + + return duration, cost # values returned here rather than set because will be used to check compatibility and not set properties of action + + + def assignAsset(self, role_name, asset): + ''' + Checks if asset can be assigned to an action. + If yes, assigns asset to role in the action. + + Inputs + ------ + `role_name` : `str` + The name of the role to which the asset will be assigned. + `asset` : `dict` + The asset to be assigned to the role. + + Returns + ------- + `None` + ''' + # Make sure role_name is valid for this action + if not role_name in self.assets.keys(): + raise Exception(f"The specified role name '{role_name}' is not in this action.") + + if self.assets[role_name] is not None: + raise Exception(f"Role '{role_name}' is already filled in action '{self.name}'.") + + assignable, message = self.checkAsset(role_name, asset) + if assignable: + self.assets[role_name] = asset + else: + raise Exception(message) # throw error message + + def assignAssets(self, assets): + ''' + Assigns assets to all the roles in the action. This calls + `assignAsset()` for each role/asset pair and then calculates the + duration and cost for the action. Similar to `evaluateAssets()` + however here assets are assigned and duration and cost are + set after evaluation. + + Inputs + ------ + `assets` : `dict` + Dictionary of {role_name: asset} pairs for assignment of the + assets to the roles in the action. + + Returns + ------- + `None` + ''' + + # Assign each specified asset to its respective role + for role_name, asset in assets.items(): + self.assignAsset(role_name, asset) + + # Check that all roles in the action are filled + for role_name in self.requirements.keys(): + if self.assets[role_name] is None: + raise Exception(f"Role '{role_name}' is not filled in action '{self.name}'. Cannot calculate duration and cost.") # possibly just a warning and not an exception? + + self.calcDurationAndCost() + + def clearAssets(self): + ''' + Clears all assigned assets from the action. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + for role_name in self.assets.keys(): + self.assets[role_name] = None + + # ----- Below are drafts of methods for use by the engine ----- + """ + def begin(self): + ''' + Take control of all objects. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + for vessel in self.vesselList: + vessel._attach_to(self) + for object in self.objectList: + object._attach_to(self) + + + def end(self): + ''' + Release all objects. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + for vessel in self.vesselList: + vessel._detach_from() + for object in self.objectList: + object._detach_from() + """ + + def timestep(self): + ''' + Advance the simulation of this action forward one step in time. + + Inputs + ------ + `None` + + Returns + ------- + `None` + ''' + + # (this is just documenting an idea for possible future implementation) + # Perform the hourly action of the task + + if self.type == 'tow': + # controller - make sure things are going in right direction... + # (switch mode if need be) + if self.mode == 0 : # gathering vessels + ves = self.assets['vessel'] + dr = self.r_start - ves.r + ves.setCourse(dr) # sets vessel velocity + + # if vessel is stopped (at the object), time to move + if np.linalg.norm(ves.v) == 0: + self.mode = 1 + + if self.mode == 1: # towing + ves = self.assets['vessel'] + dr = self.r_finish - ves.r + ves.setCourse(dr) # sets vessel velocity + + # if all vessels are stopped (at the final location), time to end + if np.linalg.norm(ves.v) == 0: + self.mode = 2 + + if self.mode == 2: # finished + self.end() + + diff --git a/famodel/irma/calwave_actions.yaml b/famodel/irma/calwave_actions.yaml new file mode 100644 index 00000000..83ce4433 --- /dev/null +++ b/famodel/irma/calwave_actions.yaml @@ -0,0 +1,381 @@ +# This file defines standardized marine operations actions. +# Each entry needs numeric values per specific asset in vessels.yaml. +# Vessel actions will be checked against capabilities/actions for validation. +# +# Old format: requirements and capabilities +# New format: roles, which lists asset roles, each with associated required capabilities + +# The code that models and checks these actions is action.calcDurationAndCost(). Structural changes here will not be reflected in the code unless changes are made there as well + +### Example action ### + +# example_action: +# objects: [] or {} "The FAModel object types that are supported in this action" +# requirements: [] "Asset types" **Unused** +# roles: "the roles that assets need to fill. A way a grouping capabilities so multiple assets can be assigned to an action" +# role1: +# - capability 1 +# - capability 2 +# role2: +# - capability 3 +# duration_h: 0.0 "Duration in hours" +# Hs_m: 0.0 "Wave height constraints in meters" +# description: "A description" + + +# --- Mobilization --- + +mobilize: + objects: [] + roles: + operator: + - deck_space + duration_h: + Hs_m: + description: "Mobilization of vessel in homeport" + +demobilize: + objects: [] + roles: + operator: + - deck_space + capabilities: [] + duration_h: 1.0 + description: "Demobilization of vessel in homeport" + +load_cargo: + objects: [anchor, mooring, cable, platform, component] + roles: # The asset roles involved and the capabilities required of each role + #carrier1: [] # the port or vessel where the moorings begin + # (no requirements) + #carrier2: # the vessel things will be loaded onto + #- deck_space + #- winch + #- positioning_system + operator: # the entity with the crane (like the port or the new vessel) + - crane + - deck_space + duration_h: + Hs_m: + description: "Load-out of mooring systems and components from port or vessel onto vessel." + +# --- Towing & Transport --- + +transit_linehaul_self: + objects: [anchor] + roles: + vessel: + - engine + duration_h: + description: "Self-propelled line-haul between port and site" + +transit_linehaul_tug: + objects: [anchor] + roles: + carrier: + - engine + operator: + - bollard_pull + duration_h: + description: "Tugged line-haul convoy (tug + barge) between port and site" + +transit_onsite_self: + objects: [anchor] + roles: + vessel: + - engine + duration_h: + description: "Self-propelled in-field move between site locations" + +transit_onsite_tug: + objects: [anchor] + roles: + carrier: + - engine + operator: + - bollard_pull + duration_h: + description: "Tug + barge in-field move between site locations" + +tow: + objects: [platform] + roles: # The asset roles involved and the capabilities required of each role + carrier: + - engine + operator: + - bollard_pull + - winch + - positioning_system + duration_h: + Hs_m: + description: "Towing floating structures (e.g., floaters, barges) to site; includes station-keeping." + +transport_components: + objects: [component] + roles: # The asset roles involved and the capabilities required of each role + carrier: # vessel carrying things + - engine + - bollard_pull + - deck_space + - crane + - positioning_system + duration_h: + Hs_m: + description: "Transport of large components such as towers, nacelles, blades, or jackets." + +at_site_support: + objects: [] + roles: # The asset roles involved and the capabilities required of each role + # tug: # vessel carrying things + # - bollard_pull + # - deck_space + # - winch + # - positioning_system + # - monitoring_system + operator: + - engine + duration_h: + Hs_m: + description: "Transport of vessel around the site to provide support." + +# --- Mooring & Anchors --- + +install_anchor: + objects: [anchor, component] + roles: # The asset roles involved and the capabilities required of each role + carrier: # vessel that provides propulsion + - engine + operator: # vessel that carries, lowers and installs the anchor + - bollard_pull + - deck_space + - winch + - crane + - pump_subsea # pump_surface, drilling_machine, torque_machine + - positioning_system + - monitoring_system + duration_h: + Hs_m: + description: "Anchor installation (suction, driven, helical, DEA, SEPLA) with tensioning and verification." + +retrieve_anchor: + objects: [anchor, component] + roles: # The asset roles involved and the capabilities required of each role + carrier: + - engine + operator: + - bollard_pull + - deck_space + - winch + - bollard_pull + - crane + - pump_subsea + - positioning_system + duration_h: + Hs_m: + description: "Anchor retrieval, including break-out and recovery to deck." + +install_mooring: + objects: [mooring, component] + roles: # The asset roles involved and the capabilities required of each role + carrier: # vessel carrying the mooring + - engine + operator: # vessel laying the mooring + - bollard_pull + - deck_space + - winch + - bollard_pull + - mooring_work + - positioning_system + duration_h: + Hs_m: + description: "Laying mooring lines, tensioning and connection to anchors and floaters." + + +mooring_hookup: + objects: [mooring, component, platform] + roles: # The asset roles involved and the capabilities required of each role + carrier: + - deck_space + operator: + - winch + - bollard_pull + - mooring_work + - positioning_system + - monitoring_system + duration_h: + Hs_m: + description: "Hook-up of mooring lines to floating platforms, including pretensioning." + +# --- Heavy Lift & Installation --- + +install_wec: + objects: [platform] + capabilities: + - deck_space + - crane + - positioning_system + - monitoring_system + - rov + duration_h: + Hs_m: + description: "Lifting, placement and securement of wave energy converters (WECs) onto moorings, including alignment, connection of power/data umbilicals and verification via ROV." + +install_semisub: + objects: [platform] + capabilities: + - deck_space + - bollard_pull + - winch + - crane + - positioning_system + - monitoring_system + - rov + - sonar_survey + - pump_surface + - mooring_work + duration_h: + Hs_m: + description: "Wet tow arrival, station-keeping, ballasting/trim, mooring hookup and pretensioning, ROV verification and umbilical connections as needed." + +install_spar: + objects: [platform] + capabilities: + - deck_space + - bollard_pull + - winch + - positioning_system + - monitoring_system + - rov + - sonar_survey + - pump_surface + - mooring_work + duration_h: + Hs_m: + description: "Arrival and upending via controlled ballasting, station-keeping, fairlead/messenger handling, mooring hookup and pretensioning with ROV confirmation. Heavy-lift support may be used during port integration." + +install_tlp: + objects: [platform] + capabilities: + - deck_space + - bollard_pull + - winch + - crane + - positioning_system + - monitoring_system + - rov + - sonar_survey + - mooring_work + duration_h: + Hs_m: + description: "Tendon porch alignment, tendon hookup, sequential tensioning to target pretension, verification of offsets/RAOs and ROV checks." + +install_wtg: + objects: [turbine] + capabilities: + - deck_space + - crane + - positioning_system + - monitoring_system + duration_h: + Hs_m: + description: "Installation of wind turbine generator including tower, nacelle and blades." + +# --- Cable Operations --- + +lay_cable: + objects: [cable] + capabilities: + - deck_space + - positioning_system + - monitoring_system + - cable_reel + - sonar_survey + duration_h: + Hs_m: + description: "Laying static/dynamic power cables, including burial where required." + +cable_hookup: + objects: [cable, component, platform] + roles: # The asset roles involved and the capabilities required of each role + carrier: + - deck_space + operator: + - winch + - bollard_pull + - mooring_work + - positioning_system + - monitoring_system + duration_h: + Hs_m: + description: "Hook-up of cable to floating platforms, including pretensioning." + +retrieve_cable: + objects: [cable] + capabilities: + - deck_space + - positioning_system + - monitoring_system + - cable_reel + duration_h: + Hs_m: + description: "Cable recovery operations, including cutting, grappling and retrieval." + + # Lay and bury in a single pass using a plough +lay_and_bury_cable: + objects: [cable] + capabilities: + - deck_space + - positioning_system + - monitoring_system + - cable_reel + - cable_plough + - sonar_survey + duration_h: + Hs_m: + description: "Simultaneous lay and plough burial; continuous QA via positioning + MBES/SSS, with post-pass verification." + +# Backfill trench or stabilize cable route using rock placement +backfill_rockdump: + objects: [cable] + capabilities: + - deck_space + - positioning_system + - monitoring_system + - sonar_survey + - rock_placement + duration_h: + Hs_m: + description: "Localized rock placement to stabilize exposed cables, infill trenches or provide scour protection. Includes real-time positioning and sonar verification of rock placement." + +# --- Survey & Monitoring --- + +site_survey: + objects: [] + capabilities: + - positioning_system + - sonar_survey + - monitoring_system + duration_h: + Hs_m: + description: "Pre-installation site survey including bathymetry, sub-bottom profiling and positioning." + +monitor_installation: + objects: [anchor, mooring, component, platform, cable] + roles: + support: + - positioning_system + - monitoring_system + - rov + duration_h: 4.5 + Hs_m: + description: "Real-time monitoring of installation operations using ROV and sensor packages." + +diver_support: + objects: [] + capabilities: + - positioning_system + - sonar_survey + - monitoring_system + duration_h: + Hs_m: + description: "Divers site survey including monitoring and positioning." \ No newline at end of file diff --git a/famodel/irma/calwave_bathymetry.txt b/famodel/irma/calwave_bathymetry.txt new file mode 100644 index 00000000..b186375f --- /dev/null +++ b/famodel/irma/calwave_bathymetry.txt @@ -0,0 +1,14 @@ +--- MoorPy Bathymetry Input File --- +nGridX 8 +nGridY 5 + -2500 -2000 -1500 -1000 -500 0 1000 2500 +-2500 200.1 199.7 240 219 202 198 204 210 +-2000 207 205 210 207 205 201 211 220 +-1500 203 198 207 199 195 204 207 214 +-1000 200.4 207 201 190 199 201 203 205 + -800 210.7 198.9 185 188 193 189 177 194 + + + + + diff --git a/famodel/irma/calwave_capabilities.yaml b/famodel/irma/calwave_capabilities.yaml new file mode 100644 index 00000000..a92a6d44 --- /dev/null +++ b/famodel/irma/calwave_capabilities.yaml @@ -0,0 +1,176 @@ +# This file defines standardized capabilities for vessels and equipment. +# Each entry needs numeric values per specific asset in vessels.yaml. +# Vessel actions will be checked against capabilities/actions for validation. + +# The code that calculates the values for these capabilities is action.getMetrics(). +# Changes here won't be reflected in Irma unless the action.getMetrics() code is also updated. + +# >>> Units to be converted to standard values, with optional converter script +# for allowing conventional unit inputs. <<< + +# --- Vessel (on-board) --- + - name: engine + # description: Engine on-board of the vessel + # fields: + power_hp: # power [horsepower] + site_speed_mps: # speed [m/s] + + - name: bollard_pull + # description: Towing/holding force capability + # fields: + max_force_t: # bollard pull [t] + site_speed_mps: # speed [m/s] + + - name: deck_space + # description: Clear usable deck area and allowable load + # fields: + area_m2: # usable area [m2] + max_load_t: # allowable deck load [t] + + - name: chain_locker + # description: Chain storage capacity + # fields: + volume_m3: # storage volume [m3] + + - name: line_reel + # description: Chain/rope storage on drum or carousel + # fields: + volume_m3: # storage volume [m3] + rope_capacity_m: # total rope length storage [m] + + - name: cable_reel + # description: Cable storage on drum or carousel + # fields: + volume_m3: # storage volume [m3] + cable_capacity_m: # total cable length stowable [m] + + - name: winch + # description: Deck winch pulling capability + # fields: + max_line_pull_t: # continuous line pull [t] + brake_load_t: # static brake holding load [t] + speed_mpm: # payout/haul speed [m/min] + + - name: crane + # description: Main crane lifting capability + # fields: + capacity_t: # SWL at specified radius [t] + hook_height_m: # max hook height [m] + + - name: station_keeping + # description: Vessel station keeping capability (dynamic positioning or anchor-based) + # fields: + type: # e.g., DP0, DP1, DP2, DP3, anchor_based + + - name: mooring_work + # description: Suitability for anchor/mooring operations + # fields: + line_types: # e.g., [chain, ropes...] + stern_roller: # presence of stern roller (optional) + shark_jaws: # presence of chain stoppers/jaws (optional) + towing_pin_rating_t: # rating of towing pins [t] (optional) + +# --- Equipment (portable) --- + + - name: pump_surface + # description: Surface-connected suction pump + # fields: + power_kW: + pressure_bar: + weight_t: + dimensions_m: # LxWxH + + - name: pump_subsea + # description: Subsea suction pump (electric/hydraulic) + # fields: + power_kW: + pressure_bar: + weight_t: + dimensions_m: # LxWxH + + - name: pump_grout + # description: Grout mixing and pumping unit + # fields: + power_kW: + flow_rate_m3hr: + pressure_bar: + weight_t: + dimensions_m: # LxWxH + + - name: hydraulic_hammer + # description: Impact hammer for pile driving + # fields: + power_kW: + energy_per_blow_kJ: + weight_t: + dimensions_m: # LxWxH + + - name: vibro_hammer + # description: Vibratory hammer + # fields: + power_kW: + centrifugal_force_kN: + weight_t: + dimensions_m: # LxWxH + + - name: drilling_machine + # description: Drilling/rotary socket machine + # fields: + power_kW: + weight_t: + dimensions_m: # LxWxH + + - name: torque_machine + # description: High-torque rotation unit + # fields: + power_kW: + torque_kNm: + weight_t: + dimensions_m: # LxWxH + + - name: cable_plough + # description: + # fields: + power_kW: + weight_t: + dimensions_m: # LxWxH + + - name: rock_placement + # description: System for controlled placement of rock for trench backfill, scour protection, and seabed stabilization. + # fields: + placement_method: # e.g., fall_pipe, side_dump, grab + max_depth_m: # maximum operational water depth + accuracy_m: # placement accuracy on seabed + rock_size_range_mm: # min and max rock/gravel size + + - name: container + # description: Control/sensors container for power pack and monitoring + # fields: + weight_t: + dimensions_m: # LxWxH + + - name: rov + # description: Remotely Operated Vehicle + # fields: + class: # e.g., OBSERVATION, LIGHT, WORK-CLASS + depth_rating_m: + weight_t: + dimensions_m: # LxWxH + + - name: positioning_system + # description: Seabed placement/positioning aids + # fields: + accuracy_m: + methods: # e.g., [USBL, LBL, DVL, INS] + + - name: monitoring_system + # description: Installation performance monitoring + # fields: + metrics: # e.g., [pressure, flow, tilt, torque, bathymetry, berm_shape...] + sampling_rate_hz: + + - name: sonar_survey + # description: Sonar systems for survey and verification + # fields: + types: # e.g., [MBES, SSS, SBP] + resolution_m: diff --git a/famodel/irma/calwave_chart.py b/famodel/irma/calwave_chart.py new file mode 100644 index 00000000..69a556db --- /dev/null +++ b/famodel/irma/calwave_chart.py @@ -0,0 +1,298 @@ + +from dataclasses import dataclass +from typing import List, Optional, Dict, Tuple +import matplotlib.pyplot as plt + +# =============================== +# Data structures +# =============================== + +@dataclass +class Bubble: + action: str + duration_hr: float + label_time: str + period: Optional[Tuple[float, float]] = None + category: Optional[str] = None # new: action category for coloring + +@dataclass +class VesselTimeline: + vessel: str + bubbles: List[Bubble] + +@dataclass +class Task: + name: str + vessels: List[VesselTimeline] + +# =============================== +# Color palette + categorization +# =============================== + +# User-requested color scheme +ACTION_TYPE_COLORS: Dict[str, str] = { + 'Mobilization': '#d62728', # red + 'Towing & Transport': '#2ca02c', # green + 'Mooring & Anchors': '#0056d6', # blue + 'Heavy Lift & Installation': '#ffdd00', # yellow + 'Cable Operations': '#9467bd', # purple + 'Survey & Monitoring': '#ff7f0e', # orange + 'Other': '#1f77b4'} # fallback color (matplotlib default) + + +# Keyword buckets → chart categories +CAT_KEYS = [ + ('Mobilization', ('mobilize', 'demobilize')), + ('Towing & Transport', ('transit', 'towing', 'tow', 'convoy', 'linehaul')), + ('Mooring & Anchors', ('anchor', 'mooring', 'pretension', 'pre-tension')), + ('Survey & Monitoring', ('monitor', 'survey', 'inspection', 'rov', 'divers')), + ('Heavy Lift & Installation', ('install_wec', 'install device', 'install', 'heavy-lift', 'lift', 'lower', 'recover_wec', 'recover device')), + ('Cable Operations', ('cable', 'umbilical', 'splice', 'connect', 'wet-mate', 'dry-mate'))] + + +def view_from_task(sched_task, sc, title: str | None = None): + """ + Minimal map: scheduler Task -> chart view Task + Show an action on multiple lanes if it uses multiple assets. + + Rules per role value: + • If str and in sc.vessels → use as key. + • Else if object → resolve by identity to sc.vessels. + • Else if dict → try ['name'] as key; else if ['type'] is unique in sc.vessels, use that key. + • Add the bubble to every resolved lane (deduped). + • Skip actions with dur<=0 or with no resolvable lanes. + """ + # reverse lookup for identity → key + id2key = {id(obj): key for key, obj in sc.vessels.items()} + + # unique type → key (used only if type is unique in catalog) + type_counts = {} + for k, obj in sc.vessels.items(): + t = obj.get('type') if isinstance(obj, dict) else getattr(obj, 'type', None) + if t: + type_counts[t] = type_counts.get(t, 0) + 1 + unique_type2key = {} + for k, obj in sc.vessels.items(): + t = obj.get('type') if isinstance(obj, dict) else getattr(obj, 'type', None) + if t and type_counts.get(t) == 1: + unique_type2key[t] = k + + buckets = {} + + for a in sched_task.actions.values(): + dur = float(getattr(a, 'duration', 0.0) or 0.0) + if dur <= 0.0: + continue + + aa = getattr(a, 'assets', {}) or {} + + # collect ALL candidate roles → multiple lanes allowed + lane_keys = set() + for role in ('vessel', 'carrier', 'operator', 'support'): + if role not in aa: + continue + v = aa[role] + + # resolve lane key + lane = None + if isinstance(v, str): + lane = v if v in sc.vessels else None + elif v is not None: + lane = id2key.get(id(v)) + if lane is None and isinstance(v, dict): + nm = v.get('name') + if isinstance(nm, str) and nm in sc.vessels: + lane = nm + else: + t = v.get('type') + if t in unique_type2key: + lane = unique_type2key[t] + if lane: + lane_keys.add(lane) + + if not lane_keys: + continue + + t0 = float(getattr(a, 'start_hr', 0.0) or 0.0) + t1 = float(getattr(a, 'end_hr', t0) or 0.0) + + # Color code for action categories based on CAT_KEYS + def cat_for(act): + s = f"{getattr(act, 'type', '')} {getattr(act, 'name', '')}".lower().replace('_', ' ') + for cat, keys in CAT_KEYS: + if any(k in s for k in keys): + return cat + return 'Other' + + # one bubble per lane (same fields) + for lane in lane_keys: + b = Bubble( + action=a.name, + duration_hr=dur, + label_time=getattr(a, 'label_time', f'{dur:.1f}'), + period=(t0, t1), + category=cat_for(a)) + + buckets.setdefault(lane, []).append(b) + + # preserve sc.vessels order; only include lanes with content + lanes = [] + for vname in sc.vessels.keys(): + blist = sorted(buckets.get(vname, []), key=lambda b: b.period[0]) + if blist: + lanes.append(VesselTimeline(vessel=vname, bubbles=blist)) + + return Task(name=title or getattr(sched_task, 'name', 'Task'), vessels=lanes) + +# =============================== +# Core plotter (single-axes, multiple lanes) +# =============================== + +def plot_task(task: Task, outpath: Optional[str] = None, dpi: int = 200, + show_title: bool = True) -> None: + """ + Render a Gantt-like chart for a single Task with one axes and one horizontal lane per vessel. + • Vessel names as y-tick labels + • Baseline arrows, light span bars, circle bubbles with time inside, title above, + and consistent font sizes. + • Horizontal placement uses Bubble.period when available; otherwise cumulative within vessel. + • Bubbles are colored by Bubble.category (legend added). + """ + from matplotlib.lines import Line2D + from matplotlib.patches import Circle + + # --- figure geometry --- + nrows = max(1, len(task.vessels)) + fig_h = max(3.0, 1.2 + 1.6*nrows) + fig_w = 16.0 + + plt.rcdefaults() + plt.close('all') + fig, ax = plt.subplots(figsize=(fig_w, fig_h), dpi=dpi) + + # --- y lanes (top -> bottom keeps given order) --- + vessels_top_to_bottom = task.vessels + nrows = max(1, len(task.vessels)) + y_positions = list(range(nrows))[::-1] + name_to_y = {vt.vessel: y_positions[i] for i, vt in enumerate(vessels_top_to_bottom[::-1])} + + ax.set_yticks(y_positions) + ax.set_yticklabels([]) + ax.tick_params(axis='y', labelrotation=0) + + if show_title: + ax.set_title(task.name, loc='left', fontsize=16, pad=12) + + # --- gather periods, compute x-range --- + x_min, x_max = 0.0, 0.0 + per_row: Dict[str, List[Tuple[float, float, Bubble]]] = {vt.vessel: [] for vt in task.vessels} + + for vt in task.vessels: + t_cursor = 0.0 + for b in vt.bubbles: + if b.period: + s, e = float(b.period[0]), float(b.period[1]) + else: + s = t_cursor + e = s + float(b.duration_hr or 0.0) + per_row[vt.vessel].append((s, e, b)) + x_min = min(x_min, s) + x_max = max(x_max, e) + t_cursor = e + + # --- drawing helpers --- + def _draw_lane_baseline(y_val: float): + ax.annotate('', xy=(x_max, y_val), xytext=(x_min, y_val), + arrowprops=dict(arrowstyle='-|>', lw=2)) + + def _draw_span_hint(s: float, e: float, y_val: float): + ax.plot([s, e], [y_val, y_val], lw=6, alpha=0.15, color='k') + + def _bubble_face_color(b: Bubble) -> str: + cat = b.category or 'Other' + return ACTION_TYPE_COLORS.get(cat, ACTION_TYPE_COLORS['Other']) + + def _text_color_for_face(face: str) -> str: + return 'black' if face.lower() in ('#ffdd00', 'yellow') else 'white' + + def _draw_bubble(s: float, e: float, y_val: float, b: Bubble, i_in_row: int): + xc = 0.5*(s + e) + face = _bubble_face_color(b) + txtc = _text_color_for_face(face) + ax.plot(xc, y_val, 'o', ms=45, color=face, zorder=3) + ax.text(xc, y_val, f'{b.label_time}', ha='center', va='center', fontsize=20, + color=txtc, weight='bold') + title_offset = 0.30 if (i_in_row % 2) else 0.20 + ax.text(xc, y_val + title_offset, b.action, ha='center', va='bottom', fontsize=10) + # caps_txt = _capabilities_to_text(b.capabilities) + # if caps_txt: + # ax.text(xc, y_val - 0.26, caps_txt, ha='center', va='top', fontsize=8, wrap=True) + + # --- draw per lane --- + seen_cats: set[str] = set() + for vt in task.vessels: + y = name_to_y[vt.vessel] + items = sorted(per_row[vt.vessel], key=lambda t: t[0]) + _draw_lane_baseline(y) + for j, (s, e, b) in enumerate(items): + _draw_span_hint(s, e, y) + _draw_bubble(s, e, y, b, j) + seen_cats.add(b.category or 'Other') + + # --- legend --- + handles = [] + legend_cats = [c for c in ACTION_TYPE_COLORS.keys() if c in seen_cats] + # if you prefer to always show all categories, replace the line above with: legend_cats = list(ACTION_TYPE_COLORS.keys()) + for cat in legend_cats: + handles.append(Line2D([0], [0], marker='o', linestyle='none', markersize=12, + markerfacecolor=ACTION_TYPE_COLORS[cat], markeredgecolor='none', label=cat)) + if handles: + # Place the legend below the x-axis label (bottom center) + fig_ = ax.figure + fig_.legend(handles=handles, + loc='lower center', + bbox_to_anchor=(0.5, -0.12), # move below the axis label + ncol=3, + title='Action Types', + frameon=False) + + # --- axes cosmetics & limits --- + if x_max <= x_min: + x_max = x_min + 1.0 + pad = 0.02*(x_max - x_min) if (x_max - x_min) > 0 else 0.5 + ax.set_xlim(x_min - pad, x_max + pad) + + # Draw circled vessel names at the same y positions + x_name = x_min - 3*pad # small left offset inside the axes + + # After you have vessels_top_to_bottom, name_to_y, x_min/x_max, pad, left_extra, x_name... + max_len = max(len(vt.vessel) for vt in vessels_top_to_bottom) # longest label + + # make the circle tighter/looser: + circle_pad = 0.18 + + for vt in vessels_top_to_bottom[::-1]: + y = name_to_y[vt.vessel] + fixed_text = vt.vessel.center(max_len) # pad with spaces to max length + ax.text( + x_name, y, fixed_text, + ha='center', va='center', zorder=6, clip_on=False, + fontsize=12, color='black', fontfamily='monospace', # <- key: monospace + bbox=dict(boxstyle='circle,pad={:.2f}'.format(circle_pad), + facecolor='lightgrey', edgecolor='tomato', linewidth=6)) + + ax.set_xlabel('Timeline (h)') + ax.grid(False) + for spine in ['top', 'right', 'left']: + ax.spines[spine].set_visible(False) + + ax.set_ylim(min(y_positions) - 0.5, max(y_positions) + 0.5) + + fig = ax.figure + # Add extra bottom margin to make space for the legend below the x-axis label + fig.subplots_adjust(left=0.10, right=0.98, top=0.90, bottom=0.15) + + if outpath: + fig.savefig(outpath, dpi=dpi, bbox_inches='tight') + else: + plt.show() diff --git a/famodel/irma/calwave_irma.py b/famodel/irma/calwave_irma.py new file mode 100644 index 00000000..38ed5442 --- /dev/null +++ b/famodel/irma/calwave_irma.py @@ -0,0 +1,658 @@ +"""Core code for setting up a IO&M scenario""" + +import os +import numpy as np +import matplotlib.pyplot as plt +import networkx as nx +from calwave_action import Action, increment_name +from task import Task +import yaml + +def loadYAMLtoDict(info, already_dict=False): + '''Reads a list or YAML file and prepares a dictionary''' + + if isinstance(info, str): + + with open(info) as file: + data = yaml.load(file, Loader=yaml.FullLoader) + if not data: + raise Exception(f'File {info} does not exist or cannot be read. Please check filename.') + elif isinstance(info, list): + data = info + else: + raise Exception('loadYAMLtoDict must be passed a filename or list') + + # Go through contents and product the dictionary + info_dict = {} + + if already_dict: + # assuming it's already a dict + info_dict.update(data) + + else: # a list of dicts with name parameters + # So we will convert into a dict based on those names + for entry in data: + if not 'name' in entry: + print(entry) + raise Exception('This entry does not have a required name field.') + + if entry['name'] in info_dict: + print(entry) + raise Exception('This entry has the same name as an existing entry.') + + info_dict[entry['name']] = entry # could make this a copy operation if worried + + return info_dict + + +#def storeState(project,...): + + +#def applyState(): + + +def unifyUnits(d): + '''Converts any capability specification/metric in supported non-SI units + to be in SI units. Converts the key names as well.''' + + # >>> not working yet <<< + + + # load conversion data from YAML (eventually may want to store this in a class) + with open('spec_conversions.yaml') as file: + data = yaml.load(file, Loader=yaml.FullLoader) + + keys1 = [] + facts = [] # conversion factors + keys2 = [] + + for line in data: + keys1.append(line[0]) + facts.append(line[1]) + keys2.append(line[2]) + + # >>> dcopy = deepcopy(d) + + for asset in d.values(): # loop through each asset's dict + + capabilities = {} # new dict of capabilities to built up + + for cap_key, cap_val in asset['capabilities'].items(): + + # make the capability type sub-dictionary + capabilities[cap_key] = {} + + for key, val in cap_val.items(): # look at each capability metric + try: + i = keys1.index(key) # find if key is on the list to convert + + + if keys2[i] in cap_val.keys(): + raise Exception(f"Specification '{keys2[i]}' already exists") + + capabilities[cap_key][keys2[i]] = val * facts[i] # make converted entry + #capability[keys2[i]] = val * facts[i] # create a new SI entry + #del capability[keys1[i]] # remove the original? + + except: + + capabilities[cap_key][key] = val # copy over original form + + +class Scenario(): + + def __init__(self): + '''Initialize a scenario object that can be used for IO&M modeling of + of an offshore energy system. Eventually it will accept user-specified + settings files. + ''' + + # ----- Load database of supported things ----- + + actionTypes = loadYAMLtoDict('calwave_actions.yaml', already_dict=True) # Descriptions of actions that can be done + capabilities = loadYAMLtoDict('calwave_capabilities.yaml') + vessels = loadYAMLtoDict('calwave_vessels.yaml', already_dict=True) + objects = loadYAMLtoDict('calwave_objects.yaml', already_dict=True) + + unifyUnits(vessels) # (function doesn't work yet!) <<< + + # ----- Validate internal cross references ----- + + # Make sure vessels don't use nonexistent capabilities or actions + for key, ves in vessels.items(): + + #if key != ves['name']: + # raise Exception(f"Vessel key ({key}) contradicts its name ({ves['name']})") + + # Check capabilities + if not 'capabilities' in ves: + raise Exception(f"Vessel '{key}' is missing a capabilities list.") + + for capname, cap in ves['capabilities'].items(): + if not capname in capabilities: + raise Exception(f"Vessel '{key}' capability '{capname}' is not in the global capability list.") + + # Could also check the sub-parameters of the capability + for cap_param in cap: + if not cap_param in capabilities[capname]: + raise Exception(f"Vessel '{key}' capability '{capname}' parameter '{cap_param}' is not in the global capability's parameter list.") + + # Check actions + if not 'actions' in ves: + raise Exception(f"Vessel '{key}' is missing an actions list.") + + for act in ves['actions']: + if not act in actionTypes: + raise Exception(f"Vessel '{key}' action '{act}' is not in the global action list.") + + + # Make sure actions refer to supported object types/properties and capabilities + for key, act in actionTypes.items(): + + act['type'] = key + + #if key != act['name']: + # raise Exception(f"Action key ({key}) contradicts its name ({act['name']})") + + # Check capabilities + #if 'capabilities' in act: + # raise Exception(f"Action '{key}' is missing a capabilities list.") + + if 'capabilities' in act: + + for cap in act['capabilities']: + if not cap in capabilities: + raise Exception(f"Action '{key}' capability '{cap}' is not in the global capability list.") + + # Could also check the sub-parameters of the capability + #for cap_param in cap: + # if not cap_param in capabilities[cap['name']]: + # raise Exception(f"Action '{key}' capability '{cap['name']}' parameter '{cap_param}' is not in the global capability's parameter list.") + + if 'roles' in act: # look through capabilities listed under each role + for caps in act['roles'].values(): + for cap in caps: + if not cap in capabilities: + raise Exception(f"Action '{key}' capability '{cap}' is not in the global capability list.") + + + # Check objects + if not 'objects' in act: + raise Exception(f"Action '{key}' is missing an objects list.") + + for obj in act['objects']: + if not obj in objects: + raise Exception(f"Action '{key}' object '{obj}' is not in the global objects list.") + + # Could also check the sub-parameters of the object + if isinstance(act['objects'], dict): # if the object + for obj_param in act['objects'][obj]: + if not obj_param in objects[obj]: + raise Exception(f"Action '{key}' object '{obj}' parameter '{obj_param}' is not in the global object's parameter list.") + + + # Store some things + self.actionTypes = actionTypes + + self.capabilities = capabilities + self.vessels = vessels + self.objects = objects + + + # Initialize some things + self.actions = {} + self.tasks = {} + + + def registerAction(self, action): + '''Registers an already created action''' + + # this also handles creation of unique dictionary keys + + if action.name in self.actions: # check if there is already a key with the same name + raise Warning(f"Action '{action.name}' is already registered.") + print(f"Action name '{action.name}' is in the actions list so incrementing it...") + action.name = increment_name(action.name) + + # What about handling of dependencies?? <<< done in the action object, + # but could check that each is in the list already... + for dep in action.dependencies.values(): + if not dep in self.actions.values(): + raise Exception(f"New action '{action.name}' has a dependency '{dep.name}' this is not in the action list.") + + # Add it to the actions dictionary + self.actions[action.name] = action + + + def addAction(self, action_type_name, action_name, **kwargs): + '''Creates and action and adds it to the register''' + + if not action_type_name in self.actionTypes: + raise Exception(f"Specified action type name {action_type_name} is not in the list of loaded action types.") + + # Get dictionary of action type information + action_type = self.actionTypes[action_type_name] + + # Create the action + act = Action(action_type, action_name, **kwargs) + + # Register the action + self.registerAction(act) + + return act # return the newly created action object, or its name? + + + def addActionDependencies(self, action, dependencies): + '''Adds dependencies to an action, provided those dependencies have + already been registered in the action list. + ''' + + if not isinstance(dependencies, list): + dependencies = [dependencies] # get into list form if singular + + for dep in dependencies: + # Make sure the dependency is already registered + if dep in self.actions.values(): + action.addDependency(dep) + else: + raise Exception(f"New action '{action.name}' has a dependency '{dep.name}' this is not in the action list.") + + + def visualizeActions(self): + '''Generate a graph of the action dependencies. + ''' + + # Create the graph + G = nx.DiGraph() + for item, data in self.actions.items(): + for dep in data.dependencies: + G.add_edge(dep, item, duration=data.duration) # Store duration as edge attribute + + # Compute longest path & total duration + longest_path = nx.dag_longest_path(G, weight='duration') + longest_path_edges = list(zip(longest_path, longest_path[1:])) # Convert path into edge pairs + total_duration = sum(self.actions[node].duration for node in longest_path) + if len(longest_path)>=1: + last_node = longest_path[-1] # Identify last node of the longest path + # Define layout + pos = nx.shell_layout(G) + # Draw all nodes and edges (default gray) + nx.draw(G, pos, with_labels=True, node_size=500, + node_color='skyblue', font_size=10, font_weight='bold', + font_color='black', edge_color='gray') + + # Highlight longest path in red + nx.draw_networkx_edges(G, pos, edgelist=longest_path_edges, edge_color='red', width=2) + + # Annotate last node with total duration in red + plt.text(pos[last_node][0], pos[last_node][1] - 0.1, f"{total_duration:.2f} hr", fontsize=12, color='red', fontweight='bold', ha='center') + else: + pass + plt.axis('equal') + + # Color first node (without dependencies) green + i = 0 + for node in G.nodes(): + if G.in_degree(node) == 0: # Check if the node has no incoming edges + nx.draw_networkx_nodes(G, pos, nodelist=[node], node_color='green', node_size=500, label='Action starters' if i==0 else None) + i += 1 + plt.legend() + return G + + + def registerTask(self, task): + '''Registers an already created task''' + + # this also handles creation of unique dictionary keys + + if task.name in self.tasks: # check if there is already a key with the same name + raise Warning(f"Action '{task.name}' is already registered.") + print(f"Task name '{task.name}' is in the tasks list so incrementing it...") + task.name = increment_name(task.name) + + # Add it to the actions dictionary + self.tasks[task.name] = task + + + def addTask(self, actions, action_sequence, task_name, **kwargs): + '''Creates a task and adds it to the register''' + + # Create the action + task = Task(actions, action_sequence, task_name, **kwargs) + + # Register the action + self.registerTask(task) + + return task + + + + def findCompatibleVessels(self): + '''Go through actions and identify which vessels have the required + capabilities (could be based on capability presence, or quantitative. + ''' + + pass + + + def figureOutTaskRelationships(self): + '''Calculate timing within tasks and then figure out constraints + between tasks. + ''' + + # Figure out task durations (for a given set of asset assignments?) + for task in self.tasks.values(): + task.calcTiming() + + # Figure out timing constraints between tasks based on action dependencies + n = len(self.tasks) + dt_min = np.zeros((n,n)) # matrix of required time offsets between tasks + + for i1, task1 in enumerate(self.tasks.values()): + for i2, task2 in enumerate(self.tasks.values()): + # look at all action dependencies from tasks 1 to 2 and + # identify the limiting case (the largest time offset)... + dt_min_1_2, dt_min_2_1 = findTaskDependencies(task1, task2) + + # for now, just look in one direction + dt_min[i1, i2] = dt_min_1_2 + + return dt_min + + +def findTaskDependencies(task1, task2): + '''Finds any time dependency between the actions of two tasks. + Returns the minimum time separation required from task 1 to task 2, + and from task 2 to task 1. I + ''' + + time_1_to_2 = [] + time_2_to_1 = [] + + # Look for any dependencies where act2 depends on act1: + #for i1, act1 in enumerate(task1.actions.values()): + # for i2, act2 in enumerate(task2.actions.values()): + for a1, act1 in task1.actions.items(): + for a2, act2 in task2.actions.items(): + + if a1 in act2.dependencies: # if act2 depends on act1 + time_1_to_2.append(task1.actions_ti[a1] + act1.duration + - task2.actions_ti[a2]) + + if a2 in act1.dependencies: # if act2 depends on act1 + time_2_to_1.append(task2.actions_ti[a2] + act2.duration + - task1.actions_ti[a1]) + + print(time_1_to_2) + print(time_2_to_1) + + dt_min_1_2 = min(time_1_to_2) # minimum time required from t1 start to t2 start + dt_min_2_1 = min(time_2_to_1) # minimum time required from t2 start to t1 start + + if dt_min_1_2 + dt_min_2_1 > 0: + print(f"The timing between these two tasks seems to be impossible...") + + breakpoint() + return dt_min_1_2, dt_min_2_1 + + +def implementStrategy_staged(sc): + '''This sets up Tasks in a way that implements a staged installation + strategy where all of one thing is done before all of a next thing. + ''' + + # ----- Create a Task for all the anchor installs ----- + + # gather the relevant actions + acts = [] + for action in sc.actions.values(): + if action.type == 'install_anchor': + acts.append(action) + + # create a dictionary of dependencies indicating that these actions are all in series + act_sequence = {} # key is action name, value is a list of what action names are to be completed before it + for i in range(len(acts)): + if i==0: # first action has no dependencies + act_sequence[acts[i].name] = [] + else: # remaining actions are just a linear sequence + act_sequence[acts[i].name] = [ acts[i-1].name ] # (previous action must be done first) + + # create the task, passing in the sequence of actions + sc.addTask(acts, act_sequence, 'install_all_anchors') + + # ----- Create a Task for all the mooring installs ----- + + # gather the relevant actions + acts = [] + # first load each mooring + for action in sc.actions.values(): + if action.type == 'load_mooring': + acts.append(action) + # next lay each mooring (eventually route logic could be added) + for action in sc.actions.values(): + if action.type == 'lay_mooring': + acts.append(action) + + # create a dictionary of dependencies indicating that these actions are all in series + act_sequence = {} # key is action name, value is a list of what action names are to be completed before it + for i in range(len(acts)): + if i==0: # first action has no dependencies + act_sequence[acts[i].name] = [] + else: # remaining actions are just a linear sequence + act_sequence[acts[i].name] = [ acts[i-1].name ] # (previous action must be done first) + + # create the task, passing in the sequence of actions + sc.addTask(acts, act_sequence, 'install_all_moorings') + + + # ----- Create a Task for the platform tow-out and hookup ----- + + # gather the relevant actions + acts = [] + # first tow out the platform + acts.append(sc.actions['tow']) + # next hook up each mooring + for action in sc.actions.values(): + if action.type == 'mooring_hookup': + acts.append(action) + + # create a dictionary of dependencies indicating that these actions are all in series + act_sequence = {} # key is action name, value is a list of what action names are to be completed before it + for i in range(len(acts)): + if i==0: # first action has no dependencies + act_sequence[acts[i].name] = [] + else: # remaining actions are just a linear sequence + act_sequence[acts[i].name] = [ acts[i-1].name ] # (previous action must be done first) + + # create the task, passing in the sequence of actions + sc.addTask(acts, act_sequence, 'tow_and_hookup') + + + +if __name__ == '__main__': + '''This is currently a script to explore how some of the workflow could + work. Can move things into functions/methods as they solidify. + ''' + + # ----- Load up a Project ----- + + from famodel.project import Project + + + #%% Section 1: Project without RAFT + print('Creating project without RAFT\n') + print(os.getcwd()) + # create project object + # project = Project(file='C:/Code/FAModel/examples/OntologySample200m_1turb.yaml', raft=False) # for Windows + project = Project(file='calwave_ontology.yaml', raft=False) # for Mac + # create moorpy system of the array, include cables in the system + project.getMoorPyArray(cables=1) + # plot in 3d, using moorpy system for the mooring and cable plots + # project.plot2d() + # project.plot3d() + + ''' + # project.arrayWatchCircle(ang_spacing=20) + # save envelopes from watch circle information for each mooring line + for moor in project.mooringList.values(): + moor.getEnvelope() + + # plot motion envelopes with 2d plot + project.plot2d(save=True, plot_bathymetry=False) + ''' + + + # ----- Initialize the action stuff ----- + + sc = Scenario() # class instance holding most of the info + + + # Parse out the install steps required + + for akey, anchor in project.anchorList.items(): + + ## Test action.py for anchor install + + # add and register anchor install action(s) + a1 = sc.addAction('install_anchor', f'install_anchor-{akey}', objects=[anchor]) + duration, cost = a1.evaluateAssets({ + 'carrier' : sc.vessels["Jag"], + 'operator':sc.vessels["San_Diego"] + }) + print(f'Anchor install action {a1.name} duration: {duration:.2f} days, cost: ${cost:,.0f}') + + # register the actions as necessary for the anchor <<< do this for all objects?? + anchor.install_dependencies = [a1] + + + hookups = [] # list of hookup actions + + for mkey, mooring in project.mooringList.items(): + + # note origin and destination + + # --- lay out all the mooring's actions (and their links) + + ## Test action.py for mooring load + + # create load vessel action + a2 = sc.addAction('load_mooring', f'load_mooring-{mkey}', objects=[mooring]) + #duration, cost = a2.evaluateAssets({'carrier2' : sc.vessels["HL_Giant"], 'carrier1' : sc.vessels["Barge_squid"], 'operator' : sc.vessels["HL_Giant"]}) + print(f'Mooring load action {a2.name} duration: {duration:.2f} days, cost: ${cost:,.0f}') + + # create ship out mooring action + + # create lay mooring action + a3 = sc.addAction('lay_mooring', f'lay_mooring-{mkey}', objects=[mooring], dependencies=[a2]) + sc.addActionDependencies(a3, mooring.attached_to[0].install_dependencies) # in case of shared anchor + print(f'Lay mooring action {a3.name} duration: {duration:.2f} days, cost: ${cost:,.0f}') + + # mooring could be attached to anchor here - or could be lowered with anchor!! + #(r=r_anch, mooring=mooring, anchor=mooring.anchor...) + # the action creator can record any dependencies related to actions of the anchor + + # create hookup action + a4 = sc.addAction('mooring_hookup', f'mooring_hookup-{mkey}', + objects=[mooring, mooring.attached_to[1]], dependencies=[a2, a3]) + #(r=r, mooring=mooring, platform=platform, depends_on=[a4]) + # the action creator can record any dependencies related to actions of the platform + + hookups.append(a4) + + + # add the FOWT install action + a5 = sc.addAction('tow', 'tow', objects=[list(project.platformList.values())[0]]) + for a in hookups: + sc.addActionDependencies(a, [a5]) # make each hookup action dependent on the FOWT being towed out + + + + # ----- Generate tasks (groups of Actions according to specific strategies) ----- + + #t1 = Task(sc.actions, 'install_mooring_system') + + # ----- Do some graph analysis ----- + + G = sc.visualizeActions() + + # make some tasks with one strategy... + implementStrategy_staged(sc) + + + # dt_min = sc.figureOutTaskRelationships() + + + + # ----- Check tasks for suitable vessels and the associated costs/times ----- + + # preliminary/temporary test of anchor install asset suitability + for akey, anchor in project.anchorList.items(): + for a in anchor.install_dependencies: # go through required actions (should just be the anchor install) + a.evaluateAssets({'carrier' : sc.vessels["San_Diego"]}) # see if this example vessel can do it + + + # ----- Generate the task_asset_matrix for scheduler ----- + # UNUSED FOR NOW + task_asset_matrix = np.zeros((len(sc.tasks), len(sc.vessels), 2)) + for i, task in enumerate(sc.tasks.values()): + row = task.get_row(sc.vessels) + if row.shape != (len(sc.vessels), 2): + raise Exception(f"Task '{task.name}' get_row output has wrong shape {row.shape}, should be {(2, len(sc.vessels))}") + task_asset_matrix[i, :] = row + + # ----- Call the scheduler ----- + # for timing with weather windows and vessel assignments + + records = [] + for task in sc.tasks.values(): + print('XXXXXXX') + print(task.name) + for act in task.actions.values(): + print(f" {act.name}: duration: {act.duration} start time: {task.actions_ti[act.name]}") + # start = float(task.actions_ti[name]) # start time [hr] + # dur = float(act.duration) # duration [hr] + # end = start + dur + + # records.append({ + # 'task' : task.name, + # 'action' : name, + # 'duration_hr': dur, + # 'time_label' : f'{start:.1f}–{end:.1f} hr', + # 'periods' : [(start, end)], # ready for future split periods + # 'start_hr' : start, # optional but handy + # 'end_hr' : end + # }) + + # Example: + # for r in records: + # print(f"{r['task']} :: {r['action']} duration_hr={r['duration_hr']:.1f} " + # f"start={r['start_hr']:.1f} label='{r['time_label']}' periods={r['periods']}") + + + # ----- Run the simulation ----- + ''' + for t in np.arange(8760): + + # run the actions - these will set the modes and velocities of things... + for a in actionList: + if a.status == 0: + pass + #check if the event should be initiated + elif a.status == 1: + a.timestep() # advance the action + # if status == 2: finished, then nothing to do + + # run the time integrator to update the states of things... + for v in self.vesselList: + v.timestep() + + + + # log the state of everything... + ''' + + + + + plt.show() + \ No newline at end of file diff --git a/famodel/irma/calwave_objects.yaml b/famodel/irma/calwave_objects.yaml new file mode 100644 index 00000000..3bdccf4f --- /dev/null +++ b/famodel/irma/calwave_objects.yaml @@ -0,0 +1,33 @@ +# list of object types and attributes, a directory to ensure consistency +# (Any object relations will be checked against this list for validity) + +mooring: # object name + - length # list of supported attributes... + - pretension + - weight + +platform: # can be wec + - mass + - draft + - wec + +anchor: + - mass + - length + +component: + - mass + - length + +turbine: + +cable: + +vessel: + +#mooring: +# install sequence: +# ship out mooring +# lay mooring +# attach mooring-anchor (ROV) +# hookup mooring-platform \ No newline at end of file diff --git a/famodel/irma/calwave_ontology.yaml b/famodel/irma/calwave_ontology.yaml new file mode 100644 index 00000000..7056b323 --- /dev/null +++ b/famodel/irma/calwave_ontology.yaml @@ -0,0 +1,257 @@ +type: draft/example of floating array ontology under construction +name: +comments: +# Site condition information +site: + general: + water_depth : 200 # [m] uniform water depth + rho_water : 1025.0 # [kg/m^3] water density + rho_air : 1.225 # [kg/m^3] air density + mu_air : 1.81e-05 # air dynamic viscosity + #... + + boundaries: # project or lease area boundary, via file or vertex list + file: # filename of x-y vertex coordinates [m] + x_y: # list of polygon vertices in order [m] + - [-3000, -3000] + - [-3000, 3000] + - [3000, 3000] + - [3000, -3000] + + bathymetry: + file: './calwave_bathymetry.txt' + + seabed: + x : [-10901, 0, 10000] + y : [-10900, 0, 10000 ] + + type_array: + - [mud_soft , mud_firm , mud_soft] + - [mud_soft , mud_firm , mud_soft] + - [mud_soft , mud_firm , mud_soft] + + soil_types: + mud_soft: + Su0 : [2.39] # [kPa] + k : [1.41] # [kPa/m] + depth: [0] # [m] + mud_firm: + Su0 : [23.94] # [kPa] + k : [2.67] # [kPa/m] + depth: [0] # [m] + + metocean: + extremes: # extreme values for specified return periods (in years) + keys : [ Hs , Tp , WindSpeed, TI, Shear, Gamma, CurrentSpeed ] + data : + 1: [ 1 ,2 ,3 ] + 10: [ 1 , 2 , 3 ] + 50: [ 1 , 2 , 3 ] + 500: [ 1 , 2 , 3 ] + + probabalistic_bins: + keys : [ prob , Hs , Tp, WindSpeed, TI, Shear, Gamma, CurrentSpeed, WindDir, WaveDir, CurrentDir ] + data : + - [ 0.010 , 1 , 1 ] + - [ 0.006 , 1 , 1 ] + - [ 0.005 , 1 , 1 ] + + time_series : + file: 'metocean_timeseries.csv' + + resource : + file: 'windresource' + + RAFT_cases: + keys : [wind_speed, wind_heading, turbulence, turbine_status, yaw_misalign, wave_spectrum, wave_period, wave_height, wave_heading ] + data : # m/s deg % or e.g. IIB_NTM string deg string (s) (m) (deg) + - [ 0, 0, 0, operating, 0, JONSWAP, 12, 6, 0 ] + # - [ 16, 0, IIB_NTM, operating, 0, JONSWAP, 12, 6, 30 ] + # - [ 10.59, 0, 0.05, operating, 0, JONSWAP, 15.75, 11.86, 0 ] + + RAFT_settings: + min_freq : 0.001 # [Hz] lowest frequency to consider, also the frequency bin width + max_freq : 0.20 # [Hz] highest frequency to consider + XiStart : 0 # sets initial amplitude of each DOF for all frequencies + nIter : 4 # sets how many iterations to perform in Model.solveDynamics() + +# ----- Array-level inputs ----- + +# Wind turbine array layout +array: + keys : [ID, topsideID, platformID, mooringID, x_location, y_location, heading_adjust] + data : # ID# ID# ID# [m] [m] [deg] + - [wec, 1, 1, ms1, -1600, -1600, 180 ] + +# ----- turbines and platforms ----- + +topsides: + + - type : turbine + mRNA : 991000 # [kg] RNA mass + IxRNA : 0 # [kg-m2] RNA moment of inertia about local x axis (assumed to be identical to rotor axis for now, as approx) [kg-m^2] + IrRNA : 0 # [kg-m2] RNA moment of inertia about local y or z axes [kg-m^2] + xCG_RNA : 0 # [m] x location of RNA center of mass [m] (Actual is ~= -0.27 m) + hHub : 150.0 # [m] hub height above water line [m] + Fthrust : 1500.0E3 # [N] temporary thrust force to use + + I_drivetrain: 318628138.0 # full rotor + drivetrain inertia as felt on the high-speed shaft + + nBlades : 3 # number of blades + Zhub : 150.0 # hub height [m] + Rhub : 3.97 # hub radius [m] + precone : 4.0 # [deg] + shaft_tilt : 6.0 # [deg] + overhang : -12.0313 # [m] + aeroMod : 1 # 0 aerodynamics off; 1 aerodynamics on + +platform: + + type : WEC + potModMaster : 1 # [int] master switch for potMod variables; 0=keeps all member potMod vars the same, 1=turns all potMod vars to False (no HAMS), 2=turns all potMod vars to True (no strip) + dlsMax : 5.0 # maximum node splitting section amount for platform members; can't be 0 + qtfPath : 'IEA-15-240-RWT-UMaineSemi.12d' # path to the qtf file for the platform + rFair : 58 + zFair : -14 + + members: # list all members here + + - name : center_column # [-] an identifier (no longer has to be number) + type : 2 # [-] + rA : [ 0, 0, -20] # [m] end A coordinates + rB : [ 0, 0, 15] # [m] and B coordinates + shape : circ # [-] circular or rectangular + gamma : 0.0 # [deg] twist angle about the member's z-axis + potMod : True # [bool] Whether to model the member with potential flow (BEM model) plus viscous drag or purely strip theory + # --- outer shell including hydro--- + stations : [0, 1] # [-] location of stations along axis. Will be normalized such that start value maps to rA and end value to rB + d : 10.0 # [m] diameters if circular or side lengths if rectangular (can be pairs) + t : 0.05 # [m] wall thicknesses (scalar or list of same length as stations) + Cd : 0.6 # [-] transverse drag coefficient (optional, scalar or list of same length as stations) + Ca : 0.93 # [-] transverse added mass coefficient (optional, scalar or list of same length as stations) + CdEnd : 0.6 # [-] end axial drag coefficient (optional, scalar or list of same length as stations) + CaEnd : 1.0 # [-] end axial added mass coefficient (optional, scalar or list of same length as stations) + rho_shell : 7850 # [kg/m3] + # --- handling of end caps or any internal structures if we need them --- + cap_stations : [ 0 ] # [m] location along member of any inner structures (in same scaling as set by 'stations') + cap_t : [ 0.001 ] # [m] thickness of any internal structures + cap_d_in : [ 0 ] # [m] inner diameter of internal structures (0 for full cap/bulkhead, >0 for a ring shape) + + + - name : outer_column # [-] an identifier (no longer has to be number) + type : 2 # [-] + rA : [51.75, 0, -20] # [m] end A coordinates + rB : [51.75, 0, 15] # [m] and B coordinates + heading : [ 60, 180, 300] # [deg] heading rotation of column about z axis (for repeated members) + shape : circ # [-] circular or rectangular + gamma : 0.0 # [deg] twist angle about the member's z-axis + potMod : True # [bool] Whether to model the member with potential flow (BEM model) plus viscous drag or purely strip theory + # --- outer shell including hydro--- + stations : [0, 35] # [-] location of stations along axis. Will be normalized such that start value maps to rA and end value to rB + d : 12.5 # [m] diameters if circular or side lengths if rectangular (can be pairs) + t : 0.05 # [m] wall thicknesses (scalar or list of same length as stations) + Cd : 0.6 # [-] transverse drag coefficient (optional, scalar or list of same length as stations) + Ca : 0.93 # [-] transverse added mass coefficient (optional, scalar or list of same length as stations) + CdEnd : 1.0 # [-] end axial drag coefficient (optional, scalar or list of same length as stations) + CaEnd : 0.7 # value of 3.0 gives more heave response # [-] end axial added mass coefficient (optional, scalar or list of same length as stations) + rho_shell : 7850 # [kg/m3] + # --- ballast --- + l_fill : 1.4 # [m] + rho_fill : 5000 # [kg/m3] + # --- handling of end caps or any internal structures if we need them --- + cap_stations : [ 0 ] # [m] location along member of any inner structures (in same scaling as set by 'stations') + cap_t : [ 0.001 ] # [m] thickness of any internal structures + cap_d_in : [ 0 ] # [m] inner diameter of internal structures (0 for full cap/bulkhead, >0 for a ring shape) + + + - name : pontoon # [-] an identifier (no longer has to be number) + type : 2 # [-] + rA : [ 5 , 0, -16.5] # [m] end A coordinates + rB : [ 45.5, 0, -16.5] # [m] and B coordinates + heading : [ 60, 180, 300] # [deg] heading rotation of column about z axis (for repeated members) + shape : rect # [-] circular or rectangular + gamma : 0.0 # [deg] twist angle about the member's z-axis + potMod : False # [bool] Whether to model the member with potential flow (BEM model) plus viscous drag or purely strip theory + # --- outer shell including hydro--- + stations : [0, 40.5] # [-] location of stations along axis. Will be normalized such that start value maps to rA and end value to rB + d : [12.4, 7.0] # [m] diameters if circular or side lengths if rectangular (can be pairs) + t : 0.05 # [m] wall thicknesses (scalar or list of same length as stations) + Cd : [1.5, 2.2 ] # [-] transverse drag coefficient (optional, scalar or list of same length as stations) + Ca : [2.2, 0.2 ] # [-] transverse added mass coefficient (optional, scalar or list of same length as stations) + CdEnd : 0.0 # [-] end axial drag coefficient (optional, scalar or list of same length as stations) + CaEnd : 0.0 # [-] end axial added mass coefficient (optional, scalar or list of same length as stations) + rho_shell : 7850 # [kg/m3] + l_fill : 40.5 # [m] + rho_fill : 1025.0 # [kg/m3] + + + - name : upper_support # [-] an identifier (no longer has to be number) + type : 2 # [-] + rA : [ 5 , 0, 14.545] # [m] end A coordinates + rB : [ 45.5, 0, 14.545] # [m] and B coordinates + heading : [ 60, 180, 300] # [deg] heading rotation of column about z axis (for repeated members) + shape : circ # [-] circular or rectangular + gamma : 0.0 # [deg] twist angle about the member's z-axis + potMod : False # [bool] Whether to model the member with potential flow (BEM model) plus viscous drag or purely strip theory + # --- outer shell including hydro--- + stations : [0, 1] # [-] location of stations along axis. Will be normalized such that start value maps to rA and end value to rB + d : 0.91 # [m] diameters if circular or side lengths if rectangular (can be pairs) + t : 0.01 # [m] wall thicknesses (scalar or list of same length as stations) + Cd : 0.0 # [-] transverse drag coefficient (optional, scalar or list of same length as stations) + Ca : 0.0 # [-] transverse added mass coefficient (optional, scalar or list of same length as stations) + CdEnd : 0.0 # [-] end axial drag coefficient (optional, scalar or list of same length as stations) + CaEnd : 0.0 # [-] end axial added mass coefficient (optional, scalar or list of same length as stations) + rho_shell : 7850 # [kg/m3] + + +# ----- Mooring system ----- + +# Mooring system descriptions (each for an individual FOWT with no sharing) +mooring_systems: + + ms1: + name: 2-line semi-taut polyester mooring system with a third line shared + + keys: [MooringConfigID, heading, anchorType, lengthAdjust] + data: + - [ semitaut-poly_1, 0 , suction1, 0 ] + - [ semitaut-poly_1, 90 , suction1, 0 ] + - [ semitaut-poly_1, 180 , suction1, 0 ] + - [ semitaut-poly_1, 270 , suction1, 0 ] + + +# Mooring line configurations +mooring_line_configs: + + semitaut-poly_1: # mooring line configuration identifier + + name: Semitaut polyester configuration 1 # descriptive name + + span: 200 + + sections: #in order from anchor to fairlead + - mooringFamily: chain # ID of a mooring line section type + d_nom: .1549 + length: 10.7 # [m] usntretched length of line section + adjustable: True # flags that this section could be adjusted to accommodate different spacings... + - connectorType: h_link + - mooringFamily: polyester # ID of a mooring line section type + d_nom: .182 + length: 199.8 # [m] length (unstretched) + +# Mooring connector properties +mooring_connector_types: + + h_link: + m : 140 # [kg] component mass + v : 0.13 # [m^3] component volumetric displacement + +# Anchor type properties +anchor_types: + + suction1: + type : suction_pile + L : 16.4 # length of pile [m] + D : 5.45 # diameter of pile [m] + zlug : 9.32 # embedded depth of padeye [m] + diff --git a/famodel/irma/calwave_task.py b/famodel/irma/calwave_task.py new file mode 100644 index 00000000..dcc47428 --- /dev/null +++ b/famodel/irma/calwave_task.py @@ -0,0 +1,323 @@ +"""Enhanced Task class for CalWave scheduling + +- Adds earliest-start (critical-path) scheduler with single-resource constraints +- Keeps legacy level-based checkpoint scheduler (via getSequenceGraph) + +Style: single quotes, spaces around + and -, no spaces around * or / +""" + +from collections import defaultdict +import yaml + +class Task: + def __init__(self, name, actions, action_sequence, **kwargs): + ''' + Create a Task from a list of actions and a dependency map. + + Parameters + ---------- + name : str + Name of the task. + actions : list + All Action objects that are part of this task. + action_sequence : dict or None + {action_name: [predecessor_name, ...]}. + If None, dependencies are inferred from Action.dependencies. + kwargs : + Optional tuning: + • strategy='earliest' | 'levels' + • enforce_resources=True | False + • resource_roles=('vessel','carrier','operator') + ''' + # ---- options with sensible defaults (all via kwargs) ---- + strategy = kwargs.get('strategy', 'earliest') + enforce_resources = kwargs.get('enforce_resources', True) + resource_roles = kwargs.get('resource_roles', ('vessel', 'carrier', 'operator')) + + # ---- core storage ---- + self.name = name + self.actions = {a.name: a for a in actions} + # allow None → infer solely from Action.dependencies + self.action_sequence = {k: list(v) for k, v in (action_sequence or {}).items()} + self.actions_ti = {} + self.duration = 0.0 + self.cost = 0.0 + self.ti = 0.0 + self.tf = 0.0 + self.resource_roles = tuple(resource_roles) + self.enforce_resources = enforce_resources + self.strategy = strategy + # ---- scheduling ---- + if self.strategy == 'levels': + self._schedule_by_levels() + else: + self._schedule_by_earliest(enforce_resources=self.enforce_resources) + + # ---- roll-ups ---- + self.cost = sum(float(getattr(a, 'cost', 0.0) or 0.0) for a in self.actions.values()) + + # -------- Convenience constructors / helpers (build deps inside the class) -------- + + @staticmethod + def _names_from_dependencies(a): + deps = [] + for d in list(getattr(a, 'dependencies', []) or []): + deps.append(d if isinstance(d, str) else getattr(d, 'name', str(d))) + seen = set() + clean = [] + for dn in deps: + if dn != a.name and dn not in seen: + clean.append(dn); seen.add(dn) + return clean + + @classmethod + def from_scenario(cls, sc, name, **kwargs): + actions = list(sc.actions.values()) + base = {a.name: cls._names_from_dependencies(a) for a in actions} + extra = kwargs.pop('extra_dependencies', None) or {} + for k, v in extra.items(): + base.setdefault(k, []) + for d in v: + if d != k and d not in base[k]: + base[k].append(d) + return cls(name=name, actions=actions, action_sequence=base, **kwargs) + + # --------------------------- Resource & Scheduling --------------------------- + + def _action_resources(self, a): + '''Return set of resource keys (e.g., vessel names) this action occupies. + Looks into a.assigned_assets for roles in self.resource_roles. + If none assigned, returns {'unknown'} to avoid blocking anything real. + ''' + aa = getattr(a, 'assigned_assets', {}) or {} + keys = [] + for role in self.resource_roles: + v = aa.get(role) + if v is not None: + keys.append(getattr(v, 'name', str(v))) + return set(keys) if keys else {'unknown'} + + def _schedule_by_earliest(self, enforce_resources=True): + '''Earliest-start (critical-path) schedule with optional single-resource blocking.''' + # Merge dependencies from action attributes and explicit action_sequence + deps = {} + for name, a in self.actions.items(): + dlist = [] + # from Action.dependencies (may be objects or names) + for d in list(getattr(a, 'dependencies', []) or []): + dlist.append(d if isinstance(d, str) else getattr(d, 'name', str(d))) + # from explicit dict + dlist.extend(self.action_sequence.get(name, [])) + # hygiene + seen = set() + clean = [] + for d in dlist: + if d != name and d not in seen: + clean.append(d) + seen.add(d) + deps[name] = clean + + # ensure all nodes present + for name in self.actions.keys(): + deps.setdefault(name, []) + + # Build children and indegrees + children = {n: [] for n in self.actions} + indeg = {n: len(dl) for n, dl in deps.items()} + for child, dlist in deps.items(): + for parent in dlist: + children.setdefault(parent, []).append(child) + + # Ready queue (roots) + ready = sorted([n for n, k in indeg.items() if k == 0]) + + start, finish = {}, {} + avail = {} # resource -> available time + scheduled = [] + + while ready: + name = ready.pop(0) + a = self.actions[name] + scheduled.append(name) + + dep_ready = 0.0 + for d in deps[name]: + if d not in finish: + raise RuntimeError(f"Dependency '{d}' of '{name}' missing finish time.") + dep_ready = max(dep_ready, finish[d]) + + if enforce_resources: + res_keys = self._action_resources(a) + res_ready = max(avail.get(r, 0.0) for r in res_keys) if res_keys else 0.0 + else: + res_ready = 0.0 + + s = max(dep_ready, res_ready) + dur = float(getattr(a, 'duration', 0.0) or 0.0) + f = s + dur + + start[name] = s + finish[name] = f + + if enforce_resources: + for r in self._action_resources(a): + avail[r] = f + + # release children + for c in children.get(name, []): + indeg[c] -= 1 + if indeg[c] == 0: + ready.append(c) + ready.sort() + + if len(scheduled) != len(self.actions): + missing = [n for n in self.actions if n not in scheduled] + raise RuntimeError(f'Cycle or missing predecessors; unscheduled: {missing}') + + # Stamp fields on actions and compute task duration + self.actions_ti = start + for a in self.actions.values(): + a.start_hr = start[a.name] + dur = float(getattr(a, 'duration', 0.0) or 0.0) + a.end_hr = a.start_hr + dur + a.period = (a.start_hr, a.end_hr) + a.label_time = f'{dur:.1f}' + self.duration = max((finish[n] for n in finish), default=0.0) + self.tf = self.ti + self.duration + + def _schedule_by_levels(self): + '''Wrapper that reuses the legacy level-based sequence graph to set starts.''' + # Build levels using provided sequence dict + levels = {} + + def level_of(a, path): + if a in levels: + return levels[a] + if a in path: + raise ValueError(f"Cycle detected at '{a}'.") + path.add(a) + pres = self.action_sequence.get(a, []) + if not pres: + lv = 1 + else: + lv = 1 + max(level_of(p, path) if p in self.action_sequence else 1 for p in pres) + levels[a] = lv + return lv + + for name in self.action_sequence: + level_of(name, set()) + + max_level = max(levels.values(), default=1) + groups = defaultdict(list) + for n, lv in levels.items(): + groups[lv].append(n) + level_dur = {lv: max(self.actions[a].duration for a in acts) for lv, acts in groups.items()} + + t = 0.0 + starts = {} + for lv in range(1, max_level + 1): + for n in groups.get(lv, []): + starts[n] = t + t += level_dur.get(lv, 0.0) + + self.actions_ti = starts + self.duration = sum(level_dur.values()) + for a in self.actions.values(): + a.start_hr = starts.get(a.name, 0.0) + dur = float(getattr(a, 'duration', 0.0) or 0.0) + a.end_hr = a.start_hr + dur + a.period = (a.start_hr, a.end_hr) + a.label_time = f'{dur:.1f}' + self.tf = self.ti + self.duration + + def extractSeqYaml(self, output_file=None): + """ + Extract the sequence of actions into a YAML file for user editing. + + Args: + output_file (str): The name of the output YAML file. + """ + # Write the sequence data to a YAML file + if output_file is None: + output_file = f"{self.name}_sequence.yaml" + + # Build the YAML: + task_data = [] + for action_name, action in self.actions.items(): + roles = list(action.requirements.keys()) + deps = list(action.dependencies.keys()) + asset_types = [] + for role, asset in action.assets.items(): + asset_types.append(asset['type']) + + entry = { + 'action': action_name, + 'duration': round(float(action.duration), 2), + 'roles': roles, + 'assets': asset_types, + 'dependencies': deps, + } + task_data.append(entry) + + yaml_dict = {self.name: task_data} + + with open(output_file, 'w') as yaml_file: + yaml.dump(yaml_dict, yaml_file, sort_keys=False) + + print(f"Task sequence YAML file generated: {output_file}") + + def update_from_SeqYaml(self, input_file=None): + """ + Update the Task object based on a user-edited YAML file. + + Args + ---- + input_file : str, optional + The name of the YAML file (default: _sequence.yaml). + """ + if input_file is None: + input_file = f"{self.name}_sequence.yaml" + + # Load YAML content + with open(input_file, "r") as yaml_file: + seq_data = yaml.safe_load(yaml_file) + + if self.name not in seq_data: + raise ValueError(f"Task name '{self.name}' not found in YAML file.") + + updated_actions = seq_data[self.name] + + # Reset internal attributes + self.actions_ti = {} + self.duration = 0.0 + self.cost = 0.0 + self.ti = 0.0 + self.tf = 0.0 + + # Update each action from YAML + for entry in updated_actions: + a_name = entry["action"] + if a_name not in self.actions: + print(f"Skipping unknown action '{a_name}' (not in current task).") + continue + + a = self.actions[a_name] + + # Update action duration + a.duration = float(entry.get("duration", getattr(a, "duration", 0.0))) + + # TODO: Update dependencies + # TODO: Update roles + # TODO: Update assets + # TODO: Update cost + + # ---- re-scheduling ---- + if self.strategy == 'levels': + self._schedule_by_levels() + else: + self._schedule_by_earliest(enforce_resources=self.enforce_resources) + + # ---- re-roll-ups ---- + self.cost = sum(float(getattr(a, 'cost', 0.0) or 0.0) for a in self.actions.values()) + + print(f"Task '{self.name}' successfully updated from YAML file: {input_file}") diff --git a/famodel/irma/calwave_task1.py b/famodel/irma/calwave_task1.py new file mode 100644 index 00000000..995202c7 --- /dev/null +++ b/famodel/irma/calwave_task1.py @@ -0,0 +1,216 @@ +# calwave_task1.py +# Build CalWave Task 1 (Anchor installation) following the theory flow: +# 1) addAction → structure only (type, name, objects, deps) +# 2) evaluateAssets → assign vessels/roles (+ durations/costs) +# 3) (schedule/plot handled by your existing tooling) +import matplotlib.pyplot as plt +from famodel.project import Project +from calwave_irma import Scenario +import calwave_chart as chart +# from calwave_task import Task # calwave_task module (Felipe) +from task import Task # generic Task module ( Rudy ) + +import matplotlib.pyplot as plt + +sc = Scenario() # now sc exists in *this* session + +# ---------- Core builder ---------- +def build_task1_calwave(sc: Scenario, project: Project): + """ + Creates Task 1 actions + dependencies (no scheduling/plotting here). + """ + + # --- Pre-ops --- + mob_sd = sc.addAction('mobilize', 'mobilize_SanDiego') + linehaul_convoy = sc.addAction( + 'transit_linehaul_tug', 'linehaul_to_site_convoy', + dependencies=[mob_sd]) + + mob_by = sc.addAction( + 'mobilize', 'mobilize_Beyster', + #dependencies=[mob_sd] + ) + linehaul_by = sc.addAction( + 'transit_linehaul_self', 'linehaul_to_site_Beyster', + dependencies=[mob_sd]) + + # --- Compute anchor centroid (x,y) for first onsite leg start --- + anchors_all = list(project.anchorList.values()) + rs = [getattr(a, 'r', None) for a in anchors_all if getattr(a, 'r', None) is not None] + xs = [float(r[0]) for r in rs] + ys = [float(r[1]) for r in rs] + anchor_centroid = (sum(xs)/len(xs), sum(ys)/len(ys)) if xs and ys else None + try: + print('[task1] anchor_centroid =', anchor_centroid) + except Exception: + pass + + # --- On-site (domain objects REQUIRED) --- + installs, onsite_tug, onsite_by, monitors = [], [], [], [] + + # first convoy leg starts after the linehaul convoy reaches site + prev_for_next_tug = linehaul_convoy + # Beyster’s first in-field leg starts after her own linehaul + prev_for_by = linehaul_by + + for i, (key, anchor) in enumerate(project.anchorList.items(), start=1): + # 1) Onsite convoy (tug + barge) to this anchor + a_tug = sc.addAction( + 'transit_onsite_tug', f'transit_convoy-{key}', + objects=[anchor], + dependencies=[prev_for_next_tug] # first = linehaul_convoy; then = previous install + ) + + # 2) Beyster to this anchor (after previous monitor), independent of tug + a_by = sc.addAction( + 'transit_onsite_self', f'transit_Beyster-{key}', + objects=[anchor], + dependencies=[prev_for_by, prev_for_next_tug] + ) + + # Inject centroid for the FIRST onsite legs only (centroid → first anchor) + if i == 1 and anchor_centroid is not None: + a_by.meta = getattr(a_by, 'meta', {}) or {} + a_by.meta['anchor_centroid'] = anchor_centroid + a_tug.meta = getattr(a_tug, 'meta', {}) or {} + a_tug.meta['anchor_centroid'] = anchor_centroid + + # 3) Install at this anchor (wait for both tug+barge and Beyster on station) + a_inst = sc.addAction( + 'install_anchor', f'install_anchor-{key}', + objects=[anchor], + dependencies=[a_tug, a_by] + ) + + # 4) Monitor at this anchor (while anchor is installed) + a_mon = sc.addAction( + 'monitor_installation', f'monitor_installation-{key}', + objects=[anchor], + dependencies=[a_tug] + ) + + # collect handles + onsite_tug.append(a_tug) + installs.append(a_inst) + onsite_by.append(a_by) + monitors.append(a_mon) + + # chain next legs: + prev_for_next_tug = a_inst # next convoy starts from this installed anchor + prev_for_by = a_mon # or set to a_inst if you want Beyster to move immediately post-install + + + # --- Post-ops (objectless) --- + linehome_convoy = sc.addAction( + 'transit_linehaul_tug', 'linehaul_to_home_convoy', + dependencies=monitors) + + linehome_by = sc.addAction( + 'transit_linehaul_self', 'transit_to_home_Beyster', + dependencies=monitors) + + # --- Post-ops --- + demob_sd = sc.addAction( + 'demobilize', 'demobilize_SanDiego', + dependencies=[linehome_convoy]) + + demob_by = sc.addAction( + 'demobilize', 'demobilize_Beyster', + dependencies=[linehome_by]) + + # Return a simple list for downstream evaluate/schedule/plot steps + return { + 'mobilize': [mob_sd, mob_by], + 'linehaul_to_site': [linehaul_convoy, linehaul_by], + 'install': installs, + 'onsite_tug': onsite_tug, + 'onsite_by': onsite_by, + 'monitor': monitors, + 'linehaul_to_home': [linehome_convoy, linehome_by], + 'demobilize': [demob_sd, demob_by]} + +# ---------- Assignment step (assign vessels & durations) ---------- +def assign_actions(sc: Scenario, actions: dict): + """ + Assign vessels/roles and set durations where the evaluator doesn't. + Keeps creation and evaluation clearly separated. + """ + V = sc.vessels # shorthand + + # Mobilize + actions['mobilize'][0].assignAssets({'operator': V['San_Diego']}) + actions['mobilize'][1].assignAssets({'operator': V['Beyster']}) + + # Transit to site + convoy_to_site, beyster_to_site = actions['linehaul_to_site'] + convoy_to_site.assignAssets({'carrier': V['Jag'], 'operator': V['San_Diego']}) + beyster_to_site.assignAssets({'vessel': V['Beyster']}) + + # Onsite convoy (tug+barge) + for a_tug in actions['onsite_tug']: + a_tug.assignAssets({'carrier': V['Jag'], 'operator': V['San_Diego']}) + + # Install (Jag carries, San_Diego operates the install) + for a_inst in actions['install']: + a_inst.assignAssets({'carrier': V['Jag'], 'operator': V['San_Diego']}) + + # Onsite self-propelled (Beyster) + for a_by in actions['onsite_by']: + a_by.assignAssets({'vessel': V['Beyster']}) + + # Monitor (Beyster as support) + for a_mon in actions['monitor']: + a_mon.assignAssets({'support': V['Beyster']}) + + # Transit to home + convoy_to_home, beyster_to_home = actions['linehaul_to_home'] + convoy_to_home.assignAssets({'carrier': V['Jag'], 'operator': V['San_Diego']}) + beyster_to_home.assignAssets({'vessel': V['Beyster']}) + + # Demobilize + actions['demobilize'][0].assignAssets({'operator': V['San_Diego']}) + actions['demobilize'][1].assignAssets({'operator': V['Beyster']}) + + +if __name__ == '__main__': + # 1) Load ontology that mirrors the sample schema (mooring_systems + mooring_line_configs) + project = Project(file='calwave_ontology.yaml', raft=False) + project.getMoorPyArray(cables=1) + + # 2) Scenario with CalWave catalogs + sc = Scenario() + + # 3) Build (structure only) + actions = build_task1_calwave(sc, project) + + # 4) Assign (assign vessels/roles) + assign_actions(sc, actions) + + # # 5) schedule once, in the Task + # calwave_task1 = Task.from_scenario( + # sc, + # name='calwave_task1', + # strategy='earliest', # 'earliest' or 'levels' + # enforce_resources=False, # keep single-resource blocking if you want it + # resource_roles=('vessel', 'carrier', 'operator')) + + + # 5) Build Task + task1 = Task(name='calwave_task1', actions=sc.actions, action_sequence='dependencies') + + # Check assets + # task1.checkAssets(sc.vessels) + + # task1.updateStartTime(newStart=0) + + # 6) Build the Gantt chart + task1.GanttChart(color_by='asset') + plt.show() + + # Old chart building code: + # 7) build the chart input directly from the Task and plot #TODO: Rudy / Improve this later (maybe include it in Task.py/Scenario and let it plot the absolute time instead of relative time) + chart_view = chart.view_from_task(task1, sc, title='CalWave Task 1 - Anchor installation plan') + chart.plot_task(chart_view, outpath='calwave_task1_chart.png') + + + diff --git a/famodel/irma/calwave_vessels.yaml b/famodel/irma/calwave_vessels.yaml new file mode 100644 index 00000000..f3e4111b --- /dev/null +++ b/famodel/irma/calwave_vessels.yaml @@ -0,0 +1,132 @@ +# This file defines vessels for the WEC CalWave as defined in doc Task 5.4 Comprehensive IO&M and Testing Plan + +San_Diego: + # Crane barge for anchor handling + name: San_Diego + type: crane_barge + transport: + homeport: national_city + route_length_m: 41114 # distance to site + cruise_speed_mps: 2.5 # ~5 kts, from doc + Hs_m: 3 + station_keeping: + type: tug_assist # not self-propelled + capabilities: + bollard_pull: + max_force_t: 30 + site_speed_mps: 0.5 + deck_space: + area_m2: 800 + max_load_t: 1500 + crane: + capacity_t: 150 + hook_height_m: 40 + winch: + max_line_pull_t: 150 + brake_load_t: 300 + speed_mpm: 20 + monitoring_system: + metrics: [load, angle] + sampling_rate_hz: 1 + actions: + mobilize: {} + demobilize: {} + load_cargo: {} + transit_linehaul_tug: {} + transit_onsite_tug: {} + install_anchor: {} + retrieve_anchor: {} + install_mooring: {} + day_rate: 60000 # USD/day estimate + +Jag: + # Pacific Maritime Group tugboat assisting DB San Diego + name: Jag + type: tug + transport: + homeport: national_city + route_length_m: 41114 # distance to site + cruise_speed_mps: 3.1 # + Hs_m: 3.5 + station_keeping: + type: conventional + capabilities: + engine: + power_hp: 300 + site_speed_mps: 1.0 + # bollard_pull: + # max_force_t: 30 + winch: + max_line_pull_t: 50 + brake_load_t: 100 + speed_mpm: 20 + actions: + tow: {} + transit_linehaul_tug: {} + transit_onsite_tug: {} + at_site_support: {} + day_rate: 25000 + +Beyster: + # Primary support vessel + name: Beyster + type: research_vessel + transport: + homeport: point_loma + route_length_m: 30558 # distance to site + cruise_speed_mps: 12.9 # 25 kts cruise, from doc + Hs_m: 2.5 + station_keeping: + type: DP1 # Volvo DPS system + capabilities: + engine: + power_hp: 300 + site_speed_mps: 0.5 + deck_space: + area_m2: 17.8 # from doc (192 ft²) + max_load_t: 5 + crane: + capacity_t: 0.30 # starboard knuckle crane, from doc + hook_height_m: 5.2 + # a_frame: + # capacity_t: 2.5 # stern A-frame, from doc + # hook_height_m: 5 # check doc + positioning_system: + accuracy_m: 1.0 + methods: [DPS, GPS] + monitoring_system: + metrics: [pressure, video, comms] + sampling_rate_hz: 5 + actions: + tow: {} + transit_linehaul_self: {} + transit_onsite_self: {} + lay_cable: {} + mooring_hookup: {} + install_wec: {} + monitor_installation: {} + day_rate: 15000 + +# Boston_Whaler: + # # 19 ft Boston Whaler, support vessel + # type: research_vessel + # transport: + # homeport: sio_pier + # route_length_m: 555 # distance to site + # transit_speed_mps: 10.3 # ~20 kts cruise + # Hs_m: 1.5 + # station_keeping: + # type: manual + # capabilities: + # deck_space: + # area_m2: 4 + # max_load_t: 1.1 # ~1134 kg payload, from doc + # # propulsion: + # # outboard_hp: 150 # from doc + # monitoring_system: + # metrics: [visual, diver_support] + # sampling_rate_hz: 1 + # actions: + # diver_support: {} + # tow: {} + # day_rate: 5000 diff --git a/famodel/irma/capabilities.yaml b/famodel/irma/capabilities.yaml new file mode 100644 index 00000000..822f5c8b --- /dev/null +++ b/famodel/irma/capabilities.yaml @@ -0,0 +1,193 @@ +# ====================================================================== +# capabilities.yaml +# ---------------------------------------------------------------------- +# This file defines standardized capabilities for vessels and equipment. +# The applicable specifications are listed beneach each capability type. +# Each entry needs numeric values per specific asset in vessels.yaml. +# Vessel actions will be checked against capabilities/actions for validation. +# A unit conversion capability exists so that vessel capability specs can have +# other units (denoted after the spec name with an underscore, e.g., capacity_t) + +# Each specification field has an entry describing its type: +# - capacity (adds, e.g. for deck space) +# - normal (takes maximum value, e.g. for rating) +# - minimum (takes minimum value, e.g. for accuracy) +# - bool (just whether it exists or not) + + +# --- Vessel (on-board) --- +engine: + # description: Engine on-board of the vessel + # fields: + power: capacity # power [W] + site_speed: normal # speed [m/s] + +bollard_pull: + # description: Towing/holding force capability + # fields: + max_force: capacity # bollard pull [N] + site_speed: normal # speed [m/s] + +deck_space: + # description: Clear usable deck area and allowable load + # fields: + area: capacity # usable area [m2] + max_load: normal # allowable deck load [N] + +chain_locker: + # description: Chain storage capacity + # fields: + volume: capacity # storage volume [m3] + +line_reel: + # description: Chain/rope storage on drum or carousel + # fields: + volume: capacity # storage volume [m3] + length_capacity: normal # total rope length storage [m] + +cable_reel: + # description: Cable storage on drum or carousel + # fields: + volume: capacity # storage volume [m3] + length_capacity: normal # total cable length stowable [m] + +winch: + # description: Deck winch pulling capability + # fields: + max_line_pull: capacity # continuous line pull [N] + brake_load: normal # static brake holding load [N] + speed: normal # payout/haul speed [m/a] + + +crane: + # description: Main crane lifting capability + # fields: + capacity: capacity # lifting force of crane [N] + hook_height: normal # max hook height [m] + speed: normal # crane speed [m/s] + +station_keeping_by_dynamic_positioning: + # description: DP vessel capability for station keeping + # fields: + type: + # TODO: maybe information about thrusters here? + +station_keeping_by_anchor: + # description: Anchor-based station keeping capability + # fields: + max_hold_force: normal # maximum holding force [N] + +station_keeping_by_bowt: + # description: Station keeping by bowt + # fields: + max_hold_force: normal # maximum holding force [N] + +stern_roller: + # description: Stern roller for overboarding/lowering lines/cables over stern + # fields: + width: normal # roller width [m] + +shark_jaws: + # description: Chain stoppers/jaws for holding chain under tension + # fields: + max_load: normal # maximum holding load [N] + + +# --- Equipment (portable) --- + +pump_surface: + # description: Surface-connected suction pump + # fields: + power: normal + pressure: normal + +pump_subsea: + # description: Subsea suction pump (electric/hydraulic) + # fields: + power: normal + pressure: normal + +pump_grout: + # description: Grout mixing and pumping unit + # fields: + power: normal + flow_rate: capacity + pressure: normal + +hydraulic_hammer: + # description: Impact hammer for pile driving + # fields: + power: normal + energy_per_blow_kJ: normal + +vibro_hammer: + # description: Vibratory hammer + # fields: + power: normal + centrifugal_force: normal + +drilling_machine: + # description: Drilling/rotary socket machine + # fields: + power: normal + +torque_machine: + # description: High-torque rotation unit + # fields: + power: normal + torque: normal + +cable_plough: + # description: + # fields: + power: normal + +rock_placement: + # description: System for controlled placement of rock for trench backfill, scour protection, and seabed stabilization. + # fields: + #placement_method: # e.g., fall_pipe, side_dump, grab + fall_pipe_method: bool # whether this is the method used + side_dump_method: bool # whether this is the method used + grab_method: bool # whether this is the method used + max_depth: normal # maximum operational water depth + accuracy_m: minimum # placement accuracy on seabed + rock_size_min: normal # min rock/gravel size + rock_size_max: minimum # max rock/gravel size + +container: + # description: Control/sensors container for power pack and monitoring + # fields: + weight: + dimensions_m: # LxWxH + +rov: + # description: Remotely Operated Vehicle + # fields: + class: # e.g., OBSERVATION, LIGHT, WORK-CLASS + depth_rating: normal + weight: + dimensions_m: # LxWxH + +divers: + # description: Diver support system + # fields: + max_depth: normal + diver_count: normal + +positioning_system: + # description: Seabed placement/positioning aids + # fields: + accuracy: minimum + methods: # e.g., [USBL, LBL, DVL, INS] + +monitoring_system: + # description: Installation performance monitoring + # fields: + metrics: # e.g., [pressure, flow, tilt, torque, bathymetry, berm_shape...] + sampling_rate: normal + +sonar_survey: + # description: Sonar systems for survey and verification + # fields: + types: # e.g., [MBES, SSS, SBP] + resolution: minimum diff --git a/famodel/irma/irma.py b/famodel/irma/irma.py new file mode 100644 index 00000000..a08019ef --- /dev/null +++ b/famodel/irma/irma.py @@ -0,0 +1,893 @@ +"""Core code for setting up a IO&M scenario""" + +import os +import numpy as np +import matplotlib.pyplot as plt + +import moorpy as mp +from moorpy.helpers import set_axes_equal +from moorpy import helpers +import yaml +from copy import deepcopy +import string +try: + import raft as RAFT +except: + pass + +#from shapely.geometry import Point, Polygon, LineString +from famodel.mooring.mooring import Mooring +from famodel.platform.platform import Platform +from famodel.anchors.anchor import Anchor +from famodel.mooring.connector import Connector +from famodel.substation.substation import Substation +from famodel.cables.cable import Cable +from famodel.cables.dynamic_cable import DynamicCable +from famodel.cables.static_cable import StaticCable +from famodel.cables.cable_properties import getCableProps, getBuoyProps, loadCableProps,loadBuoyProps +from famodel.cables.components import Joint +from famodel.turbine.turbine import Turbine +from famodel.famodel_base import Node + +# Import select required helper functions +from famodel.helpers import (check_headings, head_adjust, getCableDD, getDynamicCables, + getMoorings, getAnchors, getFromDict, cleanDataTypes, + getStaticCables, getCableDesign, m2nm, loadYAML, + configureAdjuster, route_around_anchors) + +import networkx as nx +from action import Action, increment_name +from task import Task + +from assets import Vessel, Port +from scheduler import Scheduler + + + +def loadYAMLtoDict(info, already_dict=False): + '''Reads a list or YAML file and prepares a dictionary''' + + if isinstance(info, str): + + with open(info) as file: + data = yaml.load(file, Loader=yaml.FullLoader) + if not data: + raise Exception(f'File {info} does not exist or cannot be read. Please check filename.') + elif isinstance(info, list): + data = info + else: + raise Exception('loadYAMLtoDict must be passed a filename or list') + + # Go through contents and product the dictionary + info_dict = {} + + if already_dict: + # assuming it's already a dict + info_dict.update(data) + + else: # a list of dicts with name parameters + # So we will convert into a dict based on those names + for entry in data: + if not 'name' in entry: + print(entry) + raise Exception('This entry does not have a required name field.') + + if entry['name'] in info_dict: + print(entry) + raise Exception('This entry has the same name as an existing entry.') + + info_dict[entry['name']] = entry # could make this a copy operation if worried + + return info_dict + + +def printStruct(t, s=0): + '''Prints a nested list/dictionary data structure with nice indenting.''' + + if not isinstance(t,dict) and not isinstance(t,list): + print(" "*s+str(t)) + else: + for key in t: + if isinstance(t,dict) and not isinstance(t[key],dict) and not isinstance(t[key],list): + print(" "*s+str(key)+" : "+str(t[key])) + else: + print(" "*s+str(key)) + if not isinstance(t,list): + printStruct(t[key], s=s+2) + +#def storeState(project,...): + + +#def applyState(): + + +def unifyUnits(d): + '''Converts any capability specification/metric in supported non-SI units + to be in SI units. Converts the key names as well.''' + + # load conversion data from YAML (eventually may want to store this in a class) + with open('spec_conversions.yaml') as file: + data = yaml.load(file, Loader=yaml.FullLoader) + + keys1 = [] + facts = [] # conversion factors + keys2 = [] + + for line in data: + keys1.append(line[0]) + facts.append(float(line[1])) + keys2.append(line[2]) + + for name, asset in d.items(): # loop through each asset's dict + + capabilities = {} # new dict of capabilities to built up + + for cap_key, cap_val in asset['capabilities'].items(): + + # make the capability type sub-dictionary + capabilities[cap_key] = {} + + for key, val in cap_val.items(): # look at each capability metric + try: + i = keys1.index(key) # find if key is on the list to convert + + + if keys2[i] in cap_val.keys(): + raise Exception(f"Specification '{keys2[i]}' already exists") + + print(f"Converting from {key} to {keys2[i]}") + + capabilities[cap_key][keys2[i]] = val * facts[i] # make converted entry + #capability[keys2[i]] = val * facts[i] # create a new SI entry + #del capability[keys1[i]] # remove the original? + + except: + + capabilities[cap_key][key] = val # copy over original form + + # Copy over the standardized capability dict for this asset + asset['capabilities'] = capabilities + + +class Scenario(): + + def __init__(self): + '''Initialize a scenario object that can be used for IO&M modeling of + of an offshore energy system. Eventually it will accept user-specified + settings files. + ''' + + # ----- Load database of supported things ----- + + actionTypes = loadYAMLtoDict('actions.yaml', already_dict=True) # Descriptions of actions that can be done + requirements = loadYAMLtoDict('requirements.yaml', already_dict=True) # Descriptions of requirements that can be done + capabilities = loadYAMLtoDict('capabilities.yaml', already_dict=True) + vessels = loadYAMLtoDict('vessels.yaml', already_dict=True) + objects = loadYAMLtoDict('objects.yaml', already_dict=True) + + unifyUnits(vessels) # (function doesn't work yet!) <<< + + # ----- Validate internal cross references ----- + + # Make sure vessels don't use nonexistent capabilities or actions + for key, ves in vessels.items(): + + #if key != ves['name']: + # raise Exception(f"Vessel key ({key}) contradicts its name ({ves['name']})") + + # Check capabilities + if not 'capabilities' in ves: + raise Exception(f"Vessel '{key}' is missing a capabilities list.") + + for capname, cap in ves['capabilities'].items(): + if not capname in capabilities: + raise Exception(f"Vessel '{key}' capability '{capname}' is not in the global capability list.") + + # Could also check the sub-parameters of the capability + for cap_param in cap: + if not cap_param in capabilities[capname]: + #raise Exception(f"Vessel '{key}' capability '{capname}' parameter '{cap_param}' is not in the global capability's parameter list.") + print(f"Warning: Vessel '{key}' capability '{capname}' parameter '{cap_param}' is not in the global capability's parameter list.") + + # Check actions + if not 'actions' in ves: + raise Exception(f"Vessel '{key}' is missing an actions list.") + + for act in ves['actions']: + if not act in actionTypes: + raise Exception(f"Vessel '{key}' action '{act}' is not in the global action list.") + + + # Make sure actions refer to supported object types/properties and capabilities + for key, act in actionTypes.items(): + + act['type'] = key + + #if key != act['name']: + # raise Exception(f"Action key ({key}) contradicts its name ({act['name']})") + + # Check capabilities + #if 'capabilities' in act: + # raise Exception(f"Action '{key}' is missing a capabilities list.") + + if 'capabilities' in act: + + for cap in act['capabilities']: + if not cap in capabilities: + raise Exception(f"Action '{key}' capability '{cap}' is not in the global capability list.") + + # Could also check the sub-parameters of the capability + #for cap_param in cap: + # if not cap_param in capabilities[cap['name']]: + # raise Exception(f"Action '{key}' capability '{cap['name']}' parameter '{cap_param}' is not in the global capability's parameter list.") + + if 'roles' in act: # look through capabilities listed under each role + for caps in act['roles'].values(): + for cap in caps: + if not cap in capabilities: + raise Exception(f"Action '{key}' capability '{cap}' is not in the global capability list.") + + + # Check objects + if not 'objects' in act: + raise Exception(f"Action '{key}' is missing an objects list.") + + for obj in act['objects']: + if not obj in objects: + raise Exception(f"Action '{key}' object '{obj}' is not in the global objects list.") + + # Could also check the sub-parameters of the object + if isinstance(act['objects'], dict): # if the object + for obj_param in act['objects'][obj]: + if not obj_param in objects[obj]: + raise Exception(f"Action '{key}' object '{obj}' parameter '{obj_param}' is not in the global object's parameter list.") + + + # Store some things + self.actionTypes = actionTypes + + self.requirements = requirements + self.capabilities = capabilities + self.vessels = vessels + self.objects = objects + + + # Initialize some things + self.actions = {} + self.tasks = {} + + + def registerAction(self, action): + '''Registers an already created action''' + + # this also handles creation of unique dictionary keys + + if action.name in self.actions: # check if there is already a key with the same name + raise Warning(f"Action '{action.name}' is already registered.") + print(f"Action name '{action.name}' is in the actions list so incrementing it...") + action.name = increment_name(action.name) + + # What about handling of dependencies?? <<< done in the action object, + # but could check that each is in the list already... + for dep in action.dependencies.values(): + if not dep in self.actions.values(): + raise Exception(f"New action '{action.name}' has a dependency '{dep.name}' this is not in the action list.") + + # Check that all the requirements of all actions conform to the + # options in requirements.yaml. + for reqname, req in action.requirements.items(): + if req['base'] in self.requirements: # ensure this requirement is listed + for cap in req['capabilities']: + if not cap in self.capabilities: + raise Exception(f"Requirement '{reqname}' capability '{cap}' is not in the global capability list.") + else: + raise Exception(f"Action {action.name} requirement {req['base']} is not in requirements.yaml") + + # Add it to the actions dictionary + self.actions[action.name] = action + + + def addAction(self, action_type_name, action_name, **kwargs): + '''Creates an action and adds it to the register''' + + if not action_type_name in self.actionTypes: + raise Exception(f"Specified action type name {'action_type_name'} is not in the list of loaded action types.") + + # Get dictionary of action type information + action_type = self.actionTypes[action_type_name] + + # Initialize full zero-valued dictionary of possible required capability specs + reqs = {} # Start a dictionary to hold the requirements -> capabilities -> specs + for req in action_type['requirements']: + + # make sure it's a valid requirement + if '-in' in req: + req_dir = 1 # this means the req is for storage and storage is being filled + req_base = req[:-3] # this is the name of the req as in requirements.yaml, no suffix + elif '-out' in req: + req_dir = -1 + req_base = req[:-4] + else: + req_dir = 0 + req_base = req + + # Make sure the requirement and its direction are supported in the requirements yaml + if not req_base in self.requirements: + raise Exception(f"Requirement '{req_base}' is not in the requirements yaml.") + if abs(req_dir) > 0 and ('directions' not in self.requirements[req_base] + or req_dir not in self.requirements[req_base]['directions']): + raise Exception(f"Requirement '{req_base}' direction '{req_dir}' is not supported in the requirements yaml.") + + # Make the new requirements entry + reqs[req] = {'base':req_base, 'direction':req_dir, 'capabilities':{}} + + # add the caps of the req + for cap in self.requirements[req_base]['capabilities']: + reqs[req]['capabilities'][cap] = {} + #print(f' {cap}') + # add the specs of the capability + for spec in self.capabilities[cap]: + reqs[req]['capabilities'][cap][spec] = 0 + #print(f' {spec} = 0') + # swap in the filled-out dict + action_type['requirements'] = reqs + + # Create the action + act = Action(action_type, action_name, **kwargs) + + # Register the action + self.registerAction(act) + + return act # return the newly created action object, or its name? + + + def addActionDependencies(self, action, dependencies): + '''Adds dependencies to an action, provided those dependencies have + already been registered in the action list. + ''' + + if not isinstance(dependencies, list): + dependencies = [dependencies] # get into list form if singular + + for dep in dependencies: + # Make sure the dependency is already registered + if dep in self.actions.values(): + action.addDependency(dep) + else: + raise Exception(f"New action '{action.name}' has a dependency '{dep.name}' this is not in the action list.") + + + def visualizeActions(self): + '''Generate a graph of the action dependencies. + ''' + + # Create the graph + G = nx.DiGraph() + for item, data in self.actions.items(): + for dep in data.dependencies: + G.add_edge(dep, item, duration=data.duration) # Store duration as edge attribute + + # Compute longest path & total duration + longest_path = nx.dag_longest_path(G, weight='duration') + longest_path_edges = list(zip(longest_path, longest_path[1:])) # Convert path into edge pairs + total_duration = sum(self.actions[node].duration for node in longest_path) + if len(longest_path)>=1: + last_node = longest_path[-1] # Identify last node of the longest path + # Define layout + pos = nx.shell_layout(G) + # Draw all nodes and edges (default gray) + nx.draw(G, pos, with_labels=True, node_size=500, + node_color='skyblue', font_size=10, font_weight='bold', + font_color='black', edge_color='gray') + + # Highlight longest path in red + nx.draw_networkx_edges(G, pos, edgelist=longest_path_edges, edge_color='red', width=2) + + # Annotate last node with total duration in red + plt.text(pos[last_node][0], pos[last_node][1] - 0.1, f"{total_duration:.2f} hr", fontsize=12, color='red', fontweight='bold', ha='center') + else: + pass + plt.axis('equal') + + # Color first node (without dependencies) green + i = 0 + for node in G.nodes(): + if G.in_degree(node) == 0: # Check if the node has no incoming edges + nx.draw_networkx_nodes(G, pos, nodelist=[node], node_color='green', node_size=500, label='Action starters' if i==0 else None) + i += 1 + plt.legend() + return G + + + def registerTask(self, task): + '''Registers an already created task''' + + # this also handles creation of unique dictionary keys + + if task.name in self.tasks: # check if there is already a key with the same name + raise Warning(f"Action '{task.name}' is already registered.") + print(f"Task name '{task.name}' is in the tasks list so incrementing it...") + task.name = increment_name(task.name) + + # Add it to the actions dictionary + self.tasks[task.name] = task + + + def addTask(self, task_name, actions, action_sequence, **kwargs): + '''Creates a task and adds it to the register''' + + # Create the action + task = Task(task_name, actions, action_sequence, **kwargs) + + # Register the action + self.registerTask(task) + + return task + + + + def findCompatibleVessels(self): + '''Go through actions and identify which vessels have the required + capabilities (could be based on capability presence, or quantitative. + ''' + + pass + + + def figureOutTaskRelationships(self, time_interval=0.5): + '''Calculate time constraints + between tasks. + ''' + + # Figure out task durations (for a given set of asset assignments?) + #for task in self.tasks.values(): + #task.calcTiming() + + # Figure out timing constraints between tasks based on action dependencies + n = len(self.tasks) + dt_min = np.zeros((n,n)) # matrix of required time offsets between tasks + + for i1, task1 in enumerate(self.tasks.values()): + for i2, task2 in enumerate(self.tasks.values()): + # look at all action dependencies from tasks 1 to 2 and + # identify the limiting case (the largest time offset)... + dt_min_1_2, dt_min_2_1 = findTaskDependencies(task1, task2, time_interval=time_interval) + + # for now, just look in one direction + dt_min[i1, i2] = dt_min_1_2 + + return dt_min + + +def findTaskDependencies(task1, task2, time_interval=0.5): + '''Finds any time dependency between the actions of two tasks. + Returns the minimum time separation required from task 1 to task 2, + and from task 2 to task 1. I + ''' + + time_1_to_2 = [] + time_2_to_1 = [] + + # Look for any dependencies where act2 depends on act1: + #for i1, act1 in enumerate(task1.actions.values()): + # for i2, act2 in enumerate(task2.actions.values()): + for a1, act1 in task1.actions.items(): + for a2, act2 in task2.actions.items(): + + if a1 in act2.dependencies: # if act2 depends on act1 + time_1_to_2.append(task1.actions_ti[a1] + act1.duration + - task2.actions_ti[a2]) + + if a2 in act1.dependencies: # if act2 depends on act1 + time_2_to_1.append(task2.actions_ti[a2] + act2.duration + - task1.actions_ti[a1]) + + #print(time_1_to_2) + #print(time_2_to_1) + + # TODO: provide cleaner handling of whether or not there is a time constraint in either direction <<< + + # Calculate minimum times (rounded to nearest interval) + if time_1_to_2: + raw_dt_min_1_2 = min(time_1_to_2, key=abs) + dt_min_1_2 = np.round(raw_dt_min_1_2 / time_interval) * time_interval + else: + dt_min_1_2 = -np.inf + + if time_2_to_1: + raw_dt_min_2_1 = min(time_2_to_1, key=abs) + dt_min_2_1 = np.round(raw_dt_min_2_1 / time_interval) * time_interval + else: + dt_min_2_1 = -np.inf + + if dt_min_1_2 + dt_min_2_1 > 0: + print(f"The timing between these two tasks seems to be impossible...") + + #breakpoint() + return dt_min_1_2, dt_min_2_1 + + +def implementStrategy_staged(sc): + '''This sets up Tasks in a way that implements a staged installation + strategy where all of one thing is done before all of a next thing. + ''' + + # ----- Create a Task for all the anchor installs ----- + + # gather the relevant actions + acts = [] + for action in sc.actions.values(): + if action.type == 'install_anchor': + acts.append(action) + + # create the task, passing in the sequence of actions + sc.addTask('install_all_anchors', acts, action_sequence='series') + + + # ----- Create a Task for all the mooring installs ----- + + # gather the relevant actions + acts = [] + # first load each mooring + for action in sc.actions.values(): + if action.type == 'load_mooring': + acts.append(action) + # next lay each mooring (eventually route logic could be added) + for action in sc.actions.values(): + if action.type == 'lay_mooring': + acts.append(action) + + # create the task, passing in the sequence of actions + sc.addTask('install_all_moorings', acts, action_sequence='series') + + + # ----- Create a Task for the platform tow-out and hookup ----- + + # gather the relevant actions + acts = [] + # first tow out the platform + acts.append(sc.actions['tow']) + # next hook up each mooring + for action in sc.actions.values(): + if action.type == 'mooring_hookup': + acts.append(action) + + # create the task, passing in the sequence of actions + sc.addTask('tow_and_hookup', acts, action_sequence='series') + + + + + + + + + + + + +if __name__ == '__main__': + '''This is currently a script to explore how some of the workflow could + work. Can move things into functions/methods as they solidify. + ''' + + # ----- Load up a Project ----- + + from famodel.project import Project + + + #%% Section 1: Project without RAFT + print('Creating project without RAFT\n') + print(os.getcwd()) + # create project object + # project = Project(file='C:/Code/FAModel/examples/OntologySample200m_1turb.yaml', raft=False) # for Windows + project = Project(file='../../examples/OntologySample200m_1turb.yaml', raft=False) # for Mac + # create moorpy system of the array, include cables in the system + project.getMoorPyArray(cables=1) + # plot in 3d, using moorpy system for the mooring and cable plots + # project.plot2d() + # project.plot3d() + + ''' + # project.arrayWatchCircle(ang_spacing=20) + # save envelopes from watch circle information for each mooring line + for moor in project.mooringList.values(): + moor.getEnvelope() + + # plot motion envelopes with 2d plot + project.plot2d(save=True, plot_bathymetry=False) + ''' + + # Tally up some object properties (eventually make this built-in Project stuff) + for mooring in project.mooringList.values(): + # sum up mooring quantities of interest + L = 0 # length + m = 0 # mass + V = 0 # volume + + for sec in mooring.sections(): # add up the length of all sections in the mooring + L += sec['L'] + m += sec['L'] * sec['type']['m'] + V += sec['L'] * np.pi/4 * sec['type']['d_vol']**2 + + mooring.props = {} + mooring.props['length'] = L + mooring.props['pretension'] = 0 # <<< get this from MoorPy once this is moved into Mooring class? + mooring.props['weight'] = 9.8*(m - 1025*V) + mooring.props['mass'] = m + mooring.props['volume'] = V + + print("should do the same for platforms and anchors...") # <<< + + sc = Scenario() # class instance holding most of the info + + + # ----- Create the interrelated actions (including their individual requirements) ----- + print("===== Create Actions =====") + # When an action is created, its requirements will be calculated based on + # the nature of the action and the objects involved. + + for akey, anchor in project.anchorList.items(): + + ## Test action.py for anchor install + + # add and register anchor install action(s) + a1 = sc.addAction('install_anchor', f'install_anchor-{akey}', objects=[anchor]) + #duration, cost = a1.evaluateAssets({'carrier' : sc.vessels["MPSV_01"], 'operator':sc.vessels["AHTS_alpha"]}) + #print(f'Anchor install action {a1.name} duration: {duration:.2f} days, cost: ${cost:,.0f}') + + # register the actions as necessary for the anchor <<< do this for all objects?? + anchor.install_dependencies = [a1] + + + hookups = [] # list of hookup actions + + for mkey, mooring in project.mooringList.items(): + + # note origin and destination + + # --- lay out all the mooring's actions (and their links) + + ## Test action.py for mooring load + + # create load vessel action + a2 = sc.addAction('load_mooring', f'load_mooring-{mkey}', objects=[mooring]) + #duration, cost = a2.evaluateAssets({'carrier2' : sc.vessels["HL_Giant"], 'carrier1' : sc.vessels["Barge_squid"], 'operator' : sc.vessels["HL_Giant"]}) + #print(f'Mooring load action {a2.name} duration: {duration:.2f} days, cost: ${cost:,.0f}') + + # create ship out mooring action + + # create lay mooring action + a3 = sc.addAction('lay_mooring', f'lay_mooring-{mkey}', objects=[mooring], dependencies=[a2]) + sc.addActionDependencies(a3, mooring.attached_to[0].install_dependencies) # in case of shared anchor + #print(f'Lay mooring action {a3.name} duration: {duration:.2f} days, cost: ${cost:,.0f}') + + # mooring could be attached to anchor here - or could be lowered with anchor!! + #(r=r_anch, mooring=mooring, anchor=mooring.anchor...) + # the action creator can record any dependencies related to actions of the anchor + + # create hookup action + a4 = sc.addAction('mooring_hookup', f'mooring_hookup-{mkey}', + objects=[mooring, mooring.attached_to[1]], dependencies=[a3]) + #(r=r, mooring=mooring, platform=platform, depends_on=[a4]) + # the action creator can record any dependencies related to actions of the platform + + hookups.append(a4) + + + # add the FOWT install action + a5 = sc.addAction('tow', 'tow', objects=[list(project.platformList.values())[0]]) + for a in hookups: + sc.addActionDependencies(a, [a5]) # make each hookup action dependent on the FOWT being towed out + + + # ----- Do some graph analysis ----- + + #G = sc.visualizeActions() + + # ----- Generate tasks (sequences of Actions following specific strategies) ----- + print('Generating tasks') + # Call one of the task strategy implementers, which will create the tasks + # (The created tasks also contain information about their summed requirements) + implementStrategy_staged(sc) + + + # ----- Try assigning assets to the tasks ----- + print('Trying to assign assets to tasks') + for task in sc.tasks.values(): + print(f"--- Looking at task {task.name} ---") + if task.checkAssets([sc.vessels['AHTS_alpha']], display=1)[0]: + print('Assigned AHTS') + task.assignAssets([sc.vessels['AHTS_alpha']]) + elif task.checkAssets([sc.vessels['CSV_A']], display=1)[0]: + print('Assigned CSV_A') + task.assignAssets([sc.vessels['CSV_A']]) + else: + task.checkAssets([sc.vessels['AHTS_alpha'], sc.vessels['HL_Giant'], sc.vessels['CSV_A']], display=1) + + from task import combineCapabilities + asset_caps = combineCapabilities([sc.vessels['AHTS_alpha'], sc.vessels['HL_Giant'], sc.vessels['CSV_A']], display=1) + + breakpoint() + print('assigning the kitchen sink') + task.assignAssets([sc.vessels['AHTS_alpha'], sc.vessels['HL_Giant'], sc.vessels['CSV_A']]) + + # Calculation durations of the actions, and then of the task + for a in task.actions.values(): + a.calcDurationAndCost() + task.calcDuration() + + + # Example task time adjustment and plot + #sc.tasks['tow_and_hookup'].setStartTime(5) + #sc.tasks['tow_and_hookup'].chart() + + time_interval = 0.25 + + dt_min = sc.figureOutTaskRelationships(time_interval=time_interval) + + ''' + # inputs for scheduler + offsets_min = {} # min: 'taskA->taskB': offset max: 'taskA->taskB': (offset, 'exact') + for taskA_index, taskA_name in enumerate(sc.tasks.keys()): + for taskB_index, taskB_name in enumerate(sc.tasks.keys()): + if dt_min[taskA_index, taskB_index] != 0: + offsets_min[f'{taskA_name}->{taskB_name}'] = dt_min[taskA_index, taskB_index] + ''' + + # ----- Check tasks for suitable vessels and the associated costs/times ----- + ''' + # preliminary/temporary test of anchor install asset suitability + for akey, anchor in project.anchorList.items(): + for a in anchor.install_dependencies: # go through required actions (should just be the anchor install) + a.evaluateAssets([sc.vessels["MPSV_01"]]) # see if this example vessel can do it + + + # ----- Generate the task_asset_matrix for scheduler ----- + # UNUSED FOR NOW + task_asset_matrix = np.zeros((len(sc.tasks), len(sc.vessels), 2)) + for i, task in enumerate(sc.tasks.values()): + row = task.get_row(sc.vessels) + if row.shape != (len(sc.vessels), 2): + raise Exception(f"Task '{task.name}' get_row output has wrong shape {row.shape}, should be {(2, len(sc.vessels))}") + task_asset_matrix[i, :] = row + ''' + # ----- Call the scheduler ----- + # for timing with weather windows and vessel assignments + + tasks_scheduler = list(sc.tasks.keys()) + + for asset in sc.vessels.values(): + asset['max_weather'] = asset['transport']['Hs_m'] + assets_scheduler = list(sc.vessels.values()) + + # >>>>> TODO: make this automated to find all possible combinations of "realistic" asset groups + asset_groups_scheduler = [ + {'group1': ['AHTS_alpha']}, + {'group2': ['CSV_A']}, + {'group3': ['AHTS_alpha', 'CSV_A', 'HL_Giant']} + ] + + task_asset_matrix_scheduler = np.zeros([len(tasks_scheduler), len(asset_groups_scheduler), 2], dtype=int) + for i,task in enumerate(sc.tasks.values()): + for j,asset_group in enumerate(asset_groups_scheduler): + # Extract asset list from the dictionary - values() returns a list containing one list + asset_names = list(asset_group.values())[0] + asset_list = [sc.vessels[asset_name] for asset_name in asset_names] + #task.checkAssets([sc.vessels['AHTS_alpha'], sc.vessels['HL_Giant'], sc.vessels['CSV_A']], display=1) + if not task.checkAssets(asset_list, display=0)[0]: + task_asset_matrix_scheduler[i,j] = (-1, -1) + else: + task.assignAssets(asset_list) + task.calcDuration(duration_interval=time_interval) + task.calcCost() + duration_int = int(round(task.duration / time_interval)) + task_asset_matrix_scheduler[i,j] = (task.cost, duration_int) + task.clearAssets() + + + task_dependencies = {} + dependency_types = {} + offsets = {} + for i, task1 in enumerate(sc.tasks.values()): + for j, task2 in enumerate(sc.tasks.values()): + offset = dt_min[i,j] + if i != j and offset != -np.inf: + if task2.name not in task_dependencies: + task_dependencies[task2.name] = [] + task_dependencies[task2.name].append(task1.name) + dependency_types[task1.name + '->' + task2.name] = 'start_start' + offsets[task1.name + '->' + task2.name] = offset / time_interval + + for task in sc.tasks.values(): + task.calcDuration() # ensure the durations of each task are calculated + + task_start_times = {} + task_finish_times = {} + task_list = list(sc.tasks.keys()) + + for task_name in task_list: + # Find earliest start time based on dependencies + earliest_start = 0 + for i, t1_name in enumerate(task_list): + j = task_list.index(task_name) + if i != j and dt_min[i, j] != -np.inf: + # This task depends on t1 + earliest_start = max(earliest_start, + task_finish_times.get(t1_name, 0) + dt_min[i, j]) + + task_start_times[task_name] = earliest_start + task_finish_times[task_name] = earliest_start + sc.tasks[task_name].duration + + #weather = np.arange(0, max(task_finish_times.values())+ time_interval, time_interval) + weather = [int(x) for x in np.ones(int(max(task_finish_times.values()) / time_interval), dtype=int)] + + scheduler = Scheduler( + tasks=tasks_scheduler, + assets=assets_scheduler, + asset_groups=asset_groups_scheduler, + task_asset_matrix=task_asset_matrix_scheduler, + task_dependencies=task_dependencies, + dependency_types=dependency_types, + offsets=offsets, + weather=weather, + period_duration=time_interval, + wordy=1 + ) + + scheduler.set_up_optimizer() + + result = scheduler.optimize() + + a = 2 + + + ''' + records = [] + for task in sc.tasks.values(): + print('') + print(task.name) + for act in task.actions.values(): + print(f" {act.name}: duration: {act.duration:8.2f} start time: {task.actions_ti[act.name]:8.2f}") + # start = float(task.actions_ti[name]) # start time [hr] + # dur = float(act.duration) # duration [hr] + # end = start + dur + + # records.append({ + # 'task' : task.name, + # 'action' : name, + # 'duration_hr': dur, + # 'time_label' : f'{start:.1f}–{end:.1f} hr', + # 'periods' : [(start, end)], # ready for future split periods + # 'start_hr' : start, # optional but handy + # 'end_hr' : end + # }) + + # Example: + # for r in records: + # print(f"{r['task']} :: {r['action']} duration_hr={r['duration_hr']:.1f} " + # f"start={r['start_hr']:.1f} label='{r['time_label']}' periods={r['periods']}") + + ''' + # ----- Run the simulation ----- + ''' + for t in np.arange(8760): + + # run the actions - these will set the modes and velocities of things... + for a in actionList: + if a.status == 0: + pass + #check if the event should be initiated + elif a.status == 1: + a.timestep() # advance the action + # if status == 2: finished, then nothing to do + + # run the time integrator to update the states of things... + for v in self.vesselList: + v.timestep() + + # log the state of everything... + ''' + + plt.show() + \ No newline at end of file diff --git a/famodel/irma/objects.yaml b/famodel/irma/objects.yaml new file mode 100644 index 00000000..8107c37d --- /dev/null +++ b/famodel/irma/objects.yaml @@ -0,0 +1,33 @@ +# list of object types and attributes, a directory to ensure consistency +# (Any object relations will be checked against this list for validity) + +mooring: # object name + - length # list of supported attributes... + - pretension + - weight + - mass + - volume + +platform: # can be wec + - mass + - draft + - wec + +anchor: + - mass + - length + +component: + - mass + - length + +turbine: + +cable: + +#mooring: +# install sequence: +# ship out mooring +# lay mooring +# attach mooring-anchor (ROV) +# hookup mooring-platform \ No newline at end of file diff --git a/famodel/irma/requirements.yaml b/famodel/irma/requirements.yaml new file mode 100644 index 00000000..5c1f255c --- /dev/null +++ b/famodel/irma/requirements.yaml @@ -0,0 +1,216 @@ +# ====================================================================== +# requirements.yaml +# ---------------------------------------------------------------------- +# This file maps requirements and optional capabilities to marine operations actions. +# Each entry lists optional capabilities that an asset can have to fulfil the requirement. +# A requirement's list of capabilities has "or" logic; only one of them needs +# to be satisfied. + +# Example Entry: +# chain_storage: +# description: "Storage capacity for equipment or materials" +# objects: [chain] +# capabilities: +# - chain_locker +# - deck_space + + +# --- Propulsion & Towage ------------------------------------------------ + +propulsion: + description: "Ability to provide self-propelled motion or maneuvering thrust." + capabilities: + - engine + +towing: + description: "Ability to exert or resist towline force during towing." + capabilities: + - bollard_pull + +station_keeping: + description: "Ability to maintain position and heading against wind, wave, and current forces." + capabilities: + - station_keeping_by_dynamic_positioning + - station_keeping_by_anchor + - station_keeping_by_bowt + + +# --- Storage & Transport ------------------------------------------------ + +storage: + description: "General onboard deck or cargo storage capacity for components or equipment." + objects: [anchor, mooring, cable, platform, component] + directions : [-1, 1, 0] + capabilities: + - deck_space + +chain_storage: + description: "Dedicated storage capacity for chain sections in the mooring line." + objects: [mooring] + materials: [chain] + directions : [-1, 1, 0] + capabilities: + - chain_locker + - deck_space + +rope_storage: + description: "Dedicated storage capacity for rope sections in the mooring line." + objects: [mooring] + materials: [rope] + directions : [-1, 1, 0] + capabilities: + - line_reel + - deck_space + +cable_storage: + description: "Dedicated storage capacity for electrical cables or umbilicals on reels." + objects: [cable] + directions : [-1, 1, 0] + capabilities: + - cable_reel + - deck_space + +line_handling: + description: "Ability to deploy, recover, or tension mooring lines." + objects: [mooring] + capabilities: + - winch + - crane + # - shark_jaws <<< each capability should be an "or" option. Maybe these can be specs. + # - stern_roller + +cable_handling: + description: "Ability to deploy, recover, and control subsea cables under tension." + objects: [cable] + capabilities: + - winch + - crane + #- cable_reel + #- stern_roller + +lifting: + description: "Ability to lift and move heavy components vertically and horizontally." + objects: [anchor, mooring, cable, platform, component] + capabilities: + - crane + +anchor_overboarding: + description: "Capability to overboard an anchor." + objects: [anchor] + capabilities: + - crane + - stern_roller + +anchor_lowering: + description: "Capability to lower an anchor to seabed." + objects: [anchor] + capabilities: + - crane + - winch + +anchor_orienting: + description: "Capability to orient an anchor during installation." + objects: [anchor] + capabilities: + - winch + - rov + - divers + +# anchor_handling: +# description: "Capability to overboard, lower, orient, deploy, or recover anchors." +# objects: [anchor] +# capabilities: +# - winch +# - crane +# - stern_roller + +anchor_embedding: + description: "Capability to embed anchors into seabed using mechanical, hydraulic, or suction means." + objects: [anchor] + capabilities: + - bollard_pull + - engine + - pump_subsea + - pump_surface + - hydraulic_hammer + - vibro_hammer + - torque_machine + - drilling_machine + +anchor_removal: + description: "Capability to extract anchors from seabed via reverse suction or pulling." + objects: [anchor] + capabilities: + - winch + - crane + - pump_subsea + - pump_surface + + +# --- Mooring Systems --------------------------------------------------- + +mooring_work: + description: "Specialized capability for mooring hookup, tensioning, and verification." + objects: [mooring, component] + capabilities: + - winch + - shark_jaws + - stern_roller + +subsea_connection: + description: "Capability to connect or disconnect a mooring component under water (such as to an anchor)." + objects: [mooring] + capabilities: + - rov + - divers + +# --- Platform Handling & Heavy Lift ------------------------------------ + +platform_handling: + description: "Capability to position, ballast, and secure floating structures." + objects: [platform] + capabilities: + - crane + - pumping + +# (removed pumping. requirements should be based on the purpose they serve) + + +# --- Cable & Subsea Work ------------------------------------------------ + +cable_laying: + description: "Capability to lay subsea cables." + objects: [cable] + capabilities: + - winch + - cable_plough + - stern_roller + +rock_placement: + description: "Capability to place rock or gravel for cable protection, scour prevention, or trench backfill." + capabilities: + - rock_placement + - crane + + +# --- Monitoring & Survey ------------------------------------------------ + +monitoring_system: + description: "Capability to monitor parameters during installation (pressure, torque, position, etc.)." + capabilities: + - monitoring_system + - rov + - container + +positioning_system: + description: "Capability to provide high-accuracy seabed positioning or navigation." + capabilities: + - positioning_system + - sonar_survey + +survey: + description: "Capability for site or installation survey using sonar, ROV, or acoustic tools." + capabilities: + - sonar_survey + - rov + - monitoring_system diff --git a/famodel/irma/scheduler.py b/famodel/irma/scheduler.py new file mode 100644 index 00000000..f2c5525d --- /dev/null +++ b/famodel/irma/scheduler.py @@ -0,0 +1,1592 @@ +# author: @rdavies, 9-8-2025 + +# Scheduler class for managing actions and tasks +# WIP, to be merged into Irma later on + +''' +--- TODO List --- +- [] How to expand this to multiple assets per task? +- [] Eventually enable parallel tasks and multiple assets per task +- [] Convert input tasks and assets from dicts to Task and Asset objects + - [] When tasks and assets are converted from lists to objects, update the type hints for task and asset list at class initialization. +- [] Add a delay cost, i.e. a cost for each time period where X = 0 <-- do we want this? might not be needed +- [] Do we want to return any form of info dictionary? +- [] Figure out if this can be parallelized +- [] Consolidate the loops in the constraints building section +- [] Figure out how to determine which constraint is violated if the problem is infeasible +- [] Add testing +''' + +from famodel.irma.task import Task +from famodel.irma.assets import Asset +from scipy import optimize +from scipy.optimize import milp +import numpy as np +import os + +class Scheduler: + + # Inputs are strictly typed, as this is an integer programming problem (ignored by python at runtime, but helpful for readability and syntax checking). + def __init__(self, task_asset_matrix : np.ndarray, tasks : list[str] = [], assets : list[dict] = [], task_dependencies = {}, dependency_types = {}, offsets = {}, weather : list[int] = [], period_duration : float = 1, asset_groups : list[dict] = [], wordy=0, **kwargs): + ''' + Initializes the Scheduler with assets, tasks, and constraints. + + Inputs + ------ + task_asset_matrix : array-like + A 3D array of (cost, duration) tuples indicating the cost and duration for each asset group to perform each task. + Must be len(tasks) x len(asset_groups) x 2. NOTE: The duration must be in units of scheduling periods (same as weather period length). + tasks : list + A list of Task objects to be scheduled. + assets : list + A list of individual Asset objects. Used for pre-processing and conflict detection within asset groups. + task_dependencies : dict + A dictionary mapping each task to a list of its dependencies. + dependency_types : dict + A dictionary mapping each task dependency pair to its type: + - "finish_start" (default): dependent task starts after prerequisite finishes + - "start_start": dependent task starts when prerequisite starts + - "finish_finish": dependent task finishes when prerequisite finishes + - "start_finish": dependent task finishes when prerequisite starts + - "offset": dependent task starts/finishes with time offset (requires offset value) + - "same_asset": dependent task must use same asset as prerequisite + offsets : dict + A dictionary mapping each task dependency pair to its time offset (in periods). + Can specify either minimum delays or exact timing requirements: + + **Format Options:** + 1. Simple: {"task1->task2": 3} - defaults to minimum offset + 2. Tuple: {"task1->task2": (3, "exact")} - specify offset type + 3. Dict: {"task1->task2": {"value": 3, "type": "minimum"}} + + **Offset Types:** + - "minimum": dependent task waits AT LEAST offset periods (default) + - "exact": dependent task waits EXACTLY offset periods + + weather : list + A list of weather windows. The length of this list defines the number of discrete time periods available for scheduling. + period_duration : float + The duration of each scheduling period. Used for converting from periods to real time. + asset_groups : list[dict] + A list of dictionaries defining asset groups. Each dictionary maps group names to lists of individual asset names. + Example: [{'group1': ['asset_0']}, {'group2': ['asset_0', 'asset_1']}] + The task_asset_matrix dimensions must match len(asset_groups). + kwargs : dict + Additional keyword arguments for future extensions. + + Returns + ------- + None + ''' + self.wordy = wordy + + if self.wordy > 0: + print("Initializing Scheduler...") + + self.task_asset_matrix = task_asset_matrix + self.tasks = tasks + self.assets = assets # Individual assets for conflict detection + self.asset_groups = asset_groups # Asset groups for scheduling + self.weather = weather + self.task_dependencies = task_dependencies + self.dependency_types = dependency_types + self.offsets = offsets + self.period_duration = period_duration # duration of each scheduling period. Used for converting from periods to real time. + + # --- Check for valid inputs --- + + # check for valid task_asset_matrix dimensions (must be len(tasks) x len(asset_groups) x 2) + if self.task_asset_matrix.ndim != 3 or self.task_asset_matrix.shape[0] != len(self.tasks) or self.task_asset_matrix.shape[1] != len(self.asset_groups) or self.task_asset_matrix.shape[2] != 2: + raise ValueError(f"task_asset_matrix must be a 3D array with shape (len(tasks), len(asset_groups), 2). Expected: ({len(self.tasks)}, {len(self.asset_groups)}, 2), got: {self.task_asset_matrix.shape}") + + # check for integer matrix, try to correct + if self.task_asset_matrix.dtype != np.dtype('int'): + try: + self.task_asset_matrix = self.task_asset_matrix.astype(int) + except: + raise ValueError("task_asset_matrix must be a 3D array of integers with shape (len(tasks), len(asset_groups), 2).") + else: + print("Input task_asset_matrix was not integer. Converted to integer type.") + + # check for valid tasks and assets + if not all(isinstance(task, str) or isinstance(task, dict) for task in self.tasks): + raise ValueError("All elements in tasks must be strings.") + if not all(isinstance(asset, dict) for asset in self.assets): + raise ValueError("All elements in assets must be dictionaries.") + + # check for valid weather + if not all(isinstance(w, int) and w >= 0 for w in self.weather): + raise ValueError("All elements in weather must be non-negative integers representing weather severity levels.") + + # check period duration is valid + if self.period_duration <= 0: + raise ValueError("period_duration must be positive non-zero.") + + # --- Process inputs --- + + self.T = len(self.tasks) + self.A = len(self.asset_groups) # A now represents number of asset groups + self.P = len(weather) # number of scheduling periods + self.S = self.P # number of start times + + # Initialize asset group mappings for conflict detection + self._initialize_asset_groups() + + # Checks for negative duration and cost in task_asset_matrix (0 cost and duration permitted) + self.num_valid_ta_pairs = int(np.sum((self.task_asset_matrix[:,:,0] >=0) & (self.task_asset_matrix[:,:,1] >= 0))) # number of valid task-asset group pairs (cost and duration >= 0) + + # --- Debug helpers --- + # make a list of indices to help with building constraints + self.Xta_indices = [f"Xtag_[{t}][{ag}]" for t in range(self.T) for ag in range(self.A)] # task-asset group + self.Xtp_indices = [f"Xtp_[{t}][{p}]" for t in range(self.T) for p in range(self.P)] + self.Xap_indices = [f"Xagp_[{ag}][{p}]" for ag in range(self.A) for p in range(self.P)] # asset group-period + self.Xts_indices = [f"Xts_[{t}][{s}]" for t in range(self.T) for s in range(self.S)] + self.X_indices = self.Xta_indices + self.Xtp_indices + self.Xap_indices + self.Xts_indices + + if self.wordy > 0: + print(f"Scheduler initialized with {self.P} time periods, {self.T} tasks, {self.A} asset groups, and {self.S} start times.") + + + def _parse_offset(self, dep_key): + """ + Parse offset value and type from the offsets dictionary. + + Returns: + tuple: (offset_value, offset_type) where offset_type is 'minimum' or 'exact' + """ + if dep_key not in self.offsets: + return 0, 'minimum' # Default: no offset, minimum type + + offset_spec = self.offsets[dep_key] + + # Handle different input formats + if isinstance(offset_spec, (int, float)): + # Simple format: {"task1->task2": 3} + return int(offset_spec), 'minimum' + elif isinstance(offset_spec, tuple) and len(offset_spec) == 2: + # Tuple format: {"task1->task2": (3, "exact")} + value, offset_type = offset_spec + if offset_type not in ['minimum', 'exact']: + raise ValueError(f"Invalid offset type '{offset_type}'. Must be 'minimum' or 'exact'") + return int(value), offset_type + elif isinstance(offset_spec, dict): + # Dictionary format: {"task1->task2": {"value": 3, "type": "minimum"}} + if 'value' not in offset_spec: + raise ValueError(f"Offset specification for '{dep_key}' missing 'value' key") + value = int(offset_spec['value']) + offset_type = offset_spec.get('type', 'minimum') + if offset_type not in ['minimum', 'exact']: + raise ValueError(f"Invalid offset type '{offset_type}'. Must be 'minimum' or 'exact'") + return value, offset_type + else: + raise ValueError(f"Invalid offset specification for '{dep_key}'. Must be int, tuple, or dict") + + + def _initialize_asset_groups(self): + ''' + Initialize asset group mappings for conflict detection. + + Creates mappings to track: + - Which individual assets belong to which asset groups + - Which asset groups each individual asset participates in + - Individual asset name to index mappings + ''' + # Create individual asset name to index mapping + self.individual_asset_name_to_index = {} + for i, asset in enumerate(self.assets): + asset_name = asset.get('name', f'Asset_{i}') + self.individual_asset_name_to_index[asset_name] = i + + # Create mapping: asset_group_id -> list of individual asset indices + self.asset_group_to_individual_assets = {} + # Create mapping: individual_asset_index -> list of asset_group_ids it belongs to + self.individual_asset_to_asset_groups = {i: [] for i in range(len(self.assets))} + + for group_id, group_dict in enumerate(self.asset_groups): + for group_name, individual_asset_names in group_dict.items(): + self.asset_group_to_individual_assets[group_id] = [] + + for asset_name in individual_asset_names: + if asset_name in self.individual_asset_name_to_index: + individual_asset_idx = self.individual_asset_name_to_index[asset_name] + self.asset_group_to_individual_assets[group_id].append(individual_asset_idx) + self.individual_asset_to_asset_groups[individual_asset_idx].append(group_id) + else: + print(f"Warning: Individual asset '{asset_name}' in group '{group_name}' not found in assets list") + + if self.wordy > 1: + print(f"Asset group mappings initialized:") + for group_id, individual_asset_indices in self.asset_group_to_individual_assets.items(): + individual_asset_names = [self.assets[i].get('name', f'Asset_{i}') for i in individual_asset_indices] + print(f" Asset Group {group_id}: {individual_asset_names}") + + def set_up_optimizer(self, goal : str = "cost"): + ''' + Workspace for building out an optimizer. Right now, assuming the goal is minimize cost. This could easily be reworked to minimize duration, or some other value. + + This is a binary-integer linear programming problem, which can be solved with scipy.optimize.milp. + + Inputs + ------ + goal : str + The optimization goal, minimize either "cost" or "duration". Default is "cost". + + Returns + ------- + values : np.ndarray + The values vector for the optimization problem. + constraints : list + A list of constraints for the optimization problem. + integrality : np.ndarray + An array that sets decision variables as integers. + bounds : scipy.optimize.Bounds + The bounds for the decision variables (0-1). + ''' + + if self.wordy > 0: + print("Setting up the optimizer...") + + # Solves a problem of the form minimize: v^T * x + # subject to: A_ub * x <= b_ub + # A_eq * x == b_eq + # A_lb * x >= b_lb + # lb <= x <= ub # These are constrained as integers on range 0-1 + + # --- Check and Process Inputs --- + if goal == "cost": + goal_index = 0 + elif goal == "duration": + goal_index = 1 + else: + raise ValueError("goal must be either 'cost' or 'duration'.") + + # --- Build the objective function --- + + # v^T * x + + # Decision variables: + # Xta = task asset pairs + # Xtp = task period pairs + # Xap = period asset pairs + # Xts = task start-time pairs + #num_variables = (self.T * self.A) + (self.T * self.P) + (self.A * self.P) + (self.T * self.S) # number of decision variables + num_variables = (self.T * self.A) + (self.T * self.P) + (self.T * self.S) # number of decision variables + + self.Xta_start = 0 # starting index of Xta in the flattened decision variable vector + self.Xta_end = self.Xta_start + self.T * self.A # ending index of Xta in the flattened decision variable vector + self.Xtp_start = self.Xta_end # starting index of Xtp in the flattened decision variable vector + self.Xtp_end = self.Xtp_start + self.T * self.P # ending index of Xtp in the flattened decision variable vector + #self.Xap_start = self.Xtp_end # starting index of Xap in the flattened decision variable vector + #self.Xap_end = self.Xap_start + self.A * self.P # ending index of Xap in the flattened decision variable vector + self.Xts_start = self.Xtp_end # starting index of Xts in the flattened decision variable vector + self.Xts_end = self.Xts_start + self.T * self.S # ending index of Xts in the flattened decision variable vector + + # Values vector: In every planning period, the value of assigning asset a to task t is the same. Constraints determine which periods are chosen. + # Note: Intentionally using values here instead of "cost" to avoid confusion between the program 'cost' of a pairing (which could be financial cost, duration, or some other target metric for minimization) to the solver and the financial cost of a asset-task pairing. + values = np.zeros(num_variables, dtype=int) # NOTE: enforces discrete cost and duration + values[self.Xta_start:self.Xta_end] = self.task_asset_matrix[:, :, goal_index].flatten() # Set the cost or duration for the task-asset pair + + # Add small penalties for later start times (Constraint 7 implementation) + # This encourages the solver to choose earlier start times when possible + max_task_cost = np.max(self.task_asset_matrix[:, :, goal_index]) + early_start_penalty_factor = max_task_cost * 0.001 # Very small penalty (0.1% of max cost) + + for t in range(self.T): + for s in range(self.S): + # Add small penalty proportional to start time + # Later start times get higher penalties + penalty = int(early_start_penalty_factor * s) + values[self.Xts_start + t * self.S + s] = penalty + + # The rest of values (for period variables) remains zero because they do not impact cost or duration + + if self.wordy > 1: + print("Values vector of length " + str(values.shape[0]) + " created") + + # lb <= x <= ub + # Constrain decision variables to be 0 or 1 + bounds = optimize.Bounds(0, 1) # 0 <= x_i <= 1 + integrality = np.ones(num_variables, dtype=int) # x_i are int. So set integrality to 1 + + if self.wordy > 0: + print("Bounds and integrality for decision variables set. Begining to build constraints...") + + # --- build the constraints --- + + # A_ub * x <= b_ub + # A_eq * x == b_eq + # A_lb * x >= b_lb + + ''' + A note on constraints: There are two constraint matrices, the equality constraints (A_eq, b_eq) and the upper bound constraints (A_ub, b_ub). + Each row in the coefficient matrices corresponds to a constraint, and each column corresponds to a decision variable. Thus the number of columns + is equal to the number of decision variables (T*A + T*P + T*S), and the number of rows is equal to the number of constraints. + Similarly, the length of the limits matrices (b_eq, b_ub) is equal to the number of constraints. + + The equality constraints are expressed in the form A_eq * x = b_eq. Where A_eq is the coefficient matrix and b_eq is the limits matrix. + For example, the constraints 5x+3y=15 and x-y=1 can be expressed as: + A_eq = [[5, 3], + [1, -1]] + b_eq = [15, 1] + + Similarly, the upper bound constraints are expressed in the form A_ub * x <= b_ub. Where A_ub is the coefficient matrix and b_ub is the limits matrix. + For example, the constraints 2x+3y<=12 and x+y<=5 can be expressed as: + A_ub = [[2, 3], + [1, 1]] + b_ub = [12, 5] + + The lower bound constraints (A_lb and b_lb) follow the same form as the upper bound constraints. + + The lower and upper bound constraints on the decision variables (lb <= x <= ub) is handled above, limiting them to integer values of 0 or 1. + + The indexing of decision variables are: + Xta = [Xta_00, ..., Xta_0A, Xta_10, ..., Xta_1A, ..., Xta_T0, ..., Xta_TA] # task asset pairs + Xtp = [Xtp_00, ..., Xtp_0P, Xtp_10, ..., Xtp_1P, ..., Xtp_T0, ..., Xtp_TP] # task period pairs + Xap = [Xap_00, ..., Xap_0P, Xap_10, ..., Xap_1P, ..., Xap_A0, ..., Xap_AP] # asset period pairs + Xts = [Xts_00, ..., Xts_0S, Xts_10, ..., Xts_1S, ..., Xts_T0, ..., Xts_TS] # task start-time pairs + + The global decision variable is then: + X = [Xta, Xtp, Xap, Xts] + + The starting indices of each section in this global variable are saved as self.Xta_start, self.Xtp_start, self.Xap_start, and self.Xts_start. + While the values vector is only nonzero for self.Xta_start:self.Xta_end, the constraints will leverage all decision variables. + + where: + - t is the task index (0 to T), a is the asset index (0 to A), p is the period index (0 to P), and s is the start time index (0 to S) + ''' + + # Empty list of constraint coefficient matrices + A_ub_list = [] + A_eq_list = [] + A_lb_list = [] + + # Empty list of constraint limit vectors + b_ub_list = [] + b_eq_list = [] + b_lb_list = [] + + # 1) asset can only be assigned to a task if asset is capable of performing the task (value of pairing is non-negative) + ''' + if task t cannot be performed by asset a, then Xta_ta = 0 + + (Xta_00 + ... + Xta_TA) = 0 # for all tasks t in range(0:T) and assets a in range(0:A) where task_asset_matrix[t, a, goal_index] <= 0 + ''' + + # 1 row + rows = [] + for t in range(self.T): + for a in range(self.A): + if self.task_asset_matrix[t, a, goal_index] <= 0: # Invalid pairing + row = np.zeros(num_variables, dtype=int) + row[self.Xta_start + t * self.A + a] = 1 + rows.append(row) + + if rows: # Only create constraint if there are invalid pairings + self.A_eq_1 = np.vstack(rows) + self.b_eq_1 = np.zeros(self.A_eq_1.shape[0], dtype=int) + + if self.wordy > 1: + ''' + print("A_eq_1^T:") + for i in range(self.Xta_start,self.Xta_end): + pstring = str(self.X_indices[i]) + for column in self.A_eq_1.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_eq_1: ", self.b_eq_1) + ''' + print("Constraint 1 details:") + for i, row in enumerate(self.A_eq_1): + xta_idx = np.where(row == 1)[0][0] - self.Xta_start + t = xta_idx // self.A + a = xta_idx % self.A + print(f" Invalid pairing: Xta[{t},{a}] = 0") + + A_eq_list.append(self.A_eq_1) + b_eq_list.append(self.b_eq_1) + + if self.wordy > 0: + print("Constraint 1 built.") + + # 2) task dependencies must be respected (i.e., a task cannot start until all its dependencies have been satisfied) + ''' + This enforces task dependencies by ensuring that a task can only be assigned to a time period if all its dependencies have been completed in previous periods. + + Different dependency types: + - finish_start: Task B starts after Task A finishes + - start_start: Task B starts when Task A starts + - finish_finish: Task B finishes when Task A finishes + - start_finish: Task B finishes when Task A starts + - same_asset: Task B must use the same asset as Task A + + For finish_start dependencies (most common): + If task t depends on task d, then task t cannot start before task d finishes. + + Using start times: Xts[t,s] = 1 implies Xts[d,sd] = 1 where sd + duration_d <= s + + Constraint: For all valid start times s for task t, if Xts[t,s] = 1, + then there must exist some start time sd for task d such that Xts[d,sd] = 1 + and sd + duration_d <= s + + Implementation: Xts[t,s] <= sum(Xts[d,sd] for sd where sd + duration_d <= s) + ''' + if self.task_dependencies: + + rows_2 = [] + vec_2 = [] + + # Convert task names to indices for easier processing + task_name_to_index = {task.get('name', task) if isinstance(task, dict) else task: i for i, task in enumerate(self.tasks)} + + for task_name, dependencies in self.task_dependencies.items(): + if task_name not in task_name_to_index: + continue # Skip if task not in our task list + + t = task_name_to_index[task_name] # dependent task index + + for dep_task_name in dependencies: + if dep_task_name not in task_name_to_index: + continue # Skip if dependency not in our task list + + d = task_name_to_index[dep_task_name] # dependency task index + + # Get dependency type (default to finish_start) + dep_key = f"{dep_task_name}->{task_name}" + dep_type = self.dependency_types.get(dep_key, "finish_start") + + # Parse offset value and type + offset_value, offset_type = self._parse_offset(dep_key) + + if dep_type == "finish_start": + # Handle both minimum and exact offset types for finish_start dependencies + + for a_d in range(self.A): + duration_d = self.task_asset_matrix[d, a_d, 1] + if duration_d <= 0: # Skip invalid asset-task pairings + continue + + for sd in range(self.S): # For each possible start time of dependency task + finish_time_d = sd + duration_d # When task d finishes + + if offset_type == "minimum": + # Task t cannot start before (finish_time_d + offset_value) + earliest_start_t = finish_time_d + offset_value + for s in range(min(earliest_start_t, self.S)): # All start times before minimum allowed + # Constraint: Xta[d,a_d] + Xts[d,sd] + Xts[t,s] <= 2 + row = np.zeros(num_variables, dtype=int) + row[self.Xta_start + d * self.A + a_d] = 1 # Xta[d,a_d] + row[self.Xts_start + d * self.S + sd] = 1 # Xts[d,sd] + row[self.Xts_start + t * self.S + s] = 1 # Xts[t,s] + rows_2.append(row) + vec_2.append(2) # At most 2 of these 3 can be 1 simultaneously + + elif offset_type == "exact": + # Task t must start exactly at (finish_time_d + offset_value) + exact_start_t = finish_time_d + offset_value + if exact_start_t < self.S: # Valid start time + # Constraint: if task d uses asset a_d and starts at sd, + # then task t must start at exact_start_t + # Constraint: Xta[d,a_d] + Xts[d,sd] - Xts[t,exact_start_t] <= 1 + row = np.zeros(num_variables, dtype=int) + row[self.Xta_start + d * self.A + a_d] = 1 # Xta[d,a_d] + row[self.Xts_start + d * self.S + sd] = 1 # Xts[d,sd] + row[self.Xts_start + t * self.S + exact_start_t] = -1 # -Xts[t,exact_start_t] + rows_2.append(row) + vec_2.append(1) # Xta[d,a_d] + Xts[d,sd] - Xts[t,exact_start_t] <= 1 + + # Also prevent task t from starting at any other time when d is active + for s_other in range(self.S): + if s_other != exact_start_t: + row = np.zeros(num_variables, dtype=int) + row[self.Xta_start + d * self.A + a_d] = 1 # Xta[d,a_d] + row[self.Xts_start + d * self.S + sd] = 1 # Xts[d,sd] + row[self.Xts_start + t * self.S + s_other] = 1 # Xts[t,s_other] + rows_2.append(row) + vec_2.append(2) # At most 2 of these 3 can be 1 simultaneously + + elif dep_type == "start_start": + # Handle both minimum and exact offset types for start_start dependencies + + for s_d in range(self.S): # For each possible start time of dependency task + if offset_type == "minimum": + # Task t cannot start before (task d start time + offset_value) + earliest_start_t = s_d + offset_value + for s_t in range(min(earliest_start_t, self.S)): # All start times before minimum allowed + # Constraint: Xts[d,s_d] + Xts[t,s_t] <= 1 + row = np.zeros(num_variables, dtype=int) + row[self.Xts_start + d * self.S + s_d] = 1 # Xts[d,s_d] + row[self.Xts_start + t * self.S + s_t] = 1 # Xts[t,s_t] + rows_2.append(row) + vec_2.append(1) # At most 1 of these 2 can be 1 simultaneously + + elif offset_type == "exact": + # Task t must start exactly at (task d start time + offset_value) + exact_start_t = s_d + offset_value + if exact_start_t < self.S: # Valid start time + # Constraint: if task d starts at s_d, then task t must start at exact_start_t + # Constraint: Xts[d,s_d] - Xts[t,exact_start_t] <= 0 + row = np.zeros(num_variables, dtype=int) + row[self.Xts_start + d * self.S + s_d] = 1 # Xts[d,s_d] + row[self.Xts_start + t * self.S + exact_start_t] = -1 # -Xts[t,exact_start_t] + rows_2.append(row) + vec_2.append(0) # Xts[d,s_d] - Xts[t,exact_start_t] <= 0 + + # Also prevent task t from starting at any other time when d starts at s_d + for s_other in range(self.S): + if s_other != exact_start_t: + row = np.zeros(num_variables, dtype=int) + row[self.Xts_start + d * self.S + s_d] = 1 # Xts[d,s_d] + row[self.Xts_start + t * self.S + s_other] = 1 # Xts[t,s_other] + rows_2.append(row) + vec_2.append(1) # At most 1 of these 2 can be 1 simultaneously + + elif dep_type == "finish_finish": + # Task t finishes when task d finishes + offset periods + # This requires task t to finish at (task d finish time + offset) + for s_t in range(self.S): + for a_t in range(self.A): + duration_t = self.task_asset_matrix[t, a_t, 1] + if duration_t > 0: # Valid pairing for task t + end_time_t = s_t + duration_t + + # Find start times for task d that result in compatible end times + for s_d in range(self.S): + for a_d in range(self.A): + duration_d = self.task_asset_matrix[d, a_d, 1] + if duration_d > 0: # Valid pairing for task d + end_time_d = s_d + duration_d + + if end_time_t == end_time_d + offset: + # If task t starts at s_t with asset a_t AND task d starts at s_d with asset a_d, + # then they finish with correct offset (constraint satisfied) + continue + else: + # Prevent this combination if it doesn't satisfy offset + row = np.zeros(num_variables, dtype=int) + row[self.Xts_start + t * self.S + s_t] = 1 # Xts[t,s_t] + row[self.Xta_start + t * self.A + a_t] = 1 # Xta[t,a_t] + row[self.Xts_start + d * self.S + s_d] = 1 # Xts[d,s_d] + row[self.Xta_start + d * self.A + a_d] = 1 # Xta[d,a_d] + rows_2.append(row) + vec_2.append(3) # At most 3 of these 4 can be 1 simultaneously + + elif dep_type == "same_asset": + # Task t must use the same asset as task d + for a in range(self.A): + # If both tasks can use asset a + if (self.task_asset_matrix[t, a, 1] > 0 and + self.task_asset_matrix[d, a, 1] > 0): + row = np.zeros(num_variables, dtype=int) + row[self.Xta_start + t * self.A + a] = 1 # Xta[t,a] + row[self.Xta_start + d * self.A + a] = -1 # -Xta[d,a] + rows_2.append(row) + vec_2.append(0) # Xta[t,a] - Xta[d,a] <= 0, so if t uses a, then d must use a + + # Build constraint matrices if we have any dependency constraints + if rows_2: + self.A_ub_2 = np.vstack(rows_2) + self.b_ub_2 = np.array(vec_2, dtype=int) + A_ub_list.append(self.A_ub_2) + b_ub_list.append(self.b_ub_2) + + if self.wordy > 1: + print("Constraint 2 details:") + if hasattr(self, 'A_ub_2'): + for i, row in enumerate(self.A_ub_2): + # Find the variables that are non-zero in this constraint + xta_indices = np.where(row[self.Xta_start:self.Xta_start + self.T * self.A] != 0)[0] + xts_indices = np.where(row[self.Xts_start:self.Xts_start + self.T * self.S] != 0)[0] + + if len(xta_indices) > 0 or len(xts_indices) > 0: + constraint_parts = [] + + # Add Xta terms + for xta_idx in xta_indices: + coeff = row[self.Xta_start + xta_idx] + t = xta_idx // self.A + a = xta_idx % self.A + if coeff == 1: + constraint_parts.append(f"Xta[{t},{a}]") + elif coeff == -1: + constraint_parts.append(f"-Xta[{t},{a}]") + else: + constraint_parts.append(f"{coeff}*Xta[{t},{a}]") + + # Add Xts terms + for xts_idx in xts_indices: + coeff = row[self.Xts_start + xts_idx] + t = xts_idx // self.S + s = xts_idx % self.S + if coeff == 1: + constraint_parts.append(f"Xts[{t},{s}]") + elif coeff == -1: + constraint_parts.append(f"-Xts[{t},{s}]") + else: + constraint_parts.append(f"{coeff}*Xts[{t},{s}]") + + if constraint_parts: + constraint_eq = " + ".join(constraint_parts).replace("+ -", "- ") + bound = self.b_ub_2[i] + print(f" Dependency constraint: {constraint_eq} ≤ {bound}") + + if i >= 4: # Limit output to avoid too much detail + remaining = len(self.A_ub_2) - i - 1 + if remaining > 0: + print(f" ... and {remaining} more dependency constraints") + break + + if self.wordy > 0: + print("Constraint 2 built.") + + # 3) exactly one asset must be assigned to each task + ''' + Sum of all task-asset pairs must be = 1 for each task: + (Xta_00 + ... + Xta_0A) = 1 # for task 0 + (Xta_10 + ... + Xta_1A) = 1 # for task 1 + ... + (Xta_T0 + ... + Xta_TA) = 1 # for task T + + This ensures each task is assigned to exactly one asset group. + ''' + + # num_tasks rows + self.A_eq_3 = np.zeros((self.T, num_variables), dtype=int) + self.b_eq_3 = np.ones(self.T, dtype=int) + + for t in range (self.T): + # set the coefficient for each task t to one + self.A_eq_3[t, (self.Xta_start + t * self.A):(self.Xta_start + t * self.A + self.A)] = 1 # Set the coefficients for the Xta variables to 1 for each task t + + if self.wordy > 1: + ''' + print("A_eq_3^T:") + print(" T1 T2") # Header for 2 tasks + for i in range(self.Xta_start,self.Xta_end): + pstring = str(self.X_indices[i]) + for column in self.A_eq_3.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_eq_3: ", self.b_eq_3) + ''' + print("Constraint 3 details:") + for t in range(self.T): + asset_vars = [f"Xta[{t},{a}]" for a in range(self.A)] + print(f" Task {t} assignment: {' + '.join(asset_vars)} = 1") + + A_eq_list.append(self.A_eq_3) + b_eq_list.append(self.b_eq_3) + + if self.wordy > 0: + print("Constraint 3 built.") + + # 4) Individual asset conflict prevention within asset groups + ''' + We need to ensure that individual assets used within different asset groups + are not assigned to tasks that occur at the same time. + + For each individual asset, each period, and each pair of tasks that could + potentially use that individual asset (through their asset group assignments): + + We create constraints of the form: + Xta[task1,ag1] + Xta[task2,ag2] + Xtp[task1,period] + Xtp[task2,period] ≤ 3 + + Where: + - ag1 and ag2 are asset groups that both contain the same individual asset + - This prevents: task1 assigned to ag1 AND task2 assigned to ag2 AND + both tasks active in the same period (which would conflict on the shared individual asset) + + Examples: + - Xta[0,0] + Xta[1,0] + Xtp[0,p] + Xtp[1,p] ≤ 3 (same asset group) + - Xta[0,1] + Xta[1,0] + Xtp[0,p] + Xtp[1,p] ≤ 3 (different groups sharing heavy_asset) + ''' + + rows_4 = [] + bounds_4 = [] + + if self.wordy > 1: + print('Constraint 4 details:') + + # For each individual asset, create constraints to prevent conflicts + for individual_asset_idx in range(len(self.assets)): + individual_asset_name = self.assets[individual_asset_idx].get('name', f'Asset_{individual_asset_idx}') + + # Find all asset groups that contain this individual asset + asset_groups_containing_this_asset = self.individual_asset_to_asset_groups[individual_asset_idx] + + if len(asset_groups_containing_this_asset) > 0: + # For each time period + for period_idx in range(self.P): + + # Find all valid (task, asset_group) pairs that could use this individual asset + valid_task_asset_group_pairs = [] + + for task_idx in range(self.T): + for asset_group_idx in asset_groups_containing_this_asset: + # Check if this task can use this asset group (valid pairing) + if self.task_asset_matrix[task_idx, asset_group_idx, 1] > 0: # valid duration > 0 + valid_task_asset_group_pairs.append((task_idx, asset_group_idx)) + + # Create pairwise constraints between all combinations that could conflict + for i, (task1, ag1) in enumerate(valid_task_asset_group_pairs): + for j, (task2, ag2) in enumerate(valid_task_asset_group_pairs[i+1:], i+1): + + # Skip constraints that violate constraint 3 (same task, different asset groups) + # Constraint 3 already ensures exactly one asset group per task + if task1 == task2: + if self.wordy > 2: + print(f" Skipping redundant constraint: Task {task1} with groups {ag1} and {ag2} " + f"(already prevented by constraint 3)") + continue + + # Create constraint to prevent task1 and task2 from using this individual asset simultaneously + row = np.zeros(num_variables, dtype=int) + + # Add the four variables that create the conflict scenario + row[self.Xta_start + task1 * self.A + ag1] = 1 # Xta[task1,ag1] + row[self.Xta_start + task2 * self.A + ag2] = 1 # Xta[task2,ag2] + row[self.Xtp_start + task1 * self.P + period_idx] = 1 # Xtp[task1,period] + row[self.Xtp_start + task2 * self.P + period_idx] = 1 # Xtp[task2,period] + + rows_4.append(row) + bounds_4.append(3) # Sum ≤ 3 prevents all 4 from being 1 simultaneously + + if self.wordy > 1: + #print(f" Conflict constraint for {individual_asset_name} in period {period_idx}:") + print(f" Xta[{task1},{ag1}] + Xta[{task2},{ag2}] + Xtp[{task1},{period_idx}] + Xtp[{task2},{period_idx}] ≤ 3") + + # Create constraint matrix + if rows_4: + self.A_ub_4 = np.vstack(rows_4) + self.b_ub_4 = np.array(bounds_4, dtype=int) + else: + # If no individual asset conflicts possible, create empty constraint matrix + self.A_ub_4 = np.zeros((0, num_variables), dtype=int) + self.b_ub_4 = np.array([], dtype=int) + + ''' + if self.wordy > 1: + print("A_ub_4^T:") + print(" P1 P2 P3 P4 P5") # Header for 5 periods + for i in range(self.Xap_start,self.Xap_end): + pstring = str(self.X_indices[i]) + for column in self.A_ub_4.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_ub_4: ", self.b_ub_4) + ''' + + A_ub_list.append(self.A_ub_4) + b_ub_list.append(self.b_ub_4) + + if self.wordy > 0: + print("Constraint 4 built.") + + # 10) A task duration plus the start-time it is assigned to must be less than the total number of time periods available + ''' + This ensures that a task is not assigned to a period that would cause it to exceed the total number of periods available. + + (Xts * s + d_ta) <= P # for all tasks t in range(0:T) where d is the duration of task-asset pair ta + ''' + + rows = [] + for t in range(self.T): + for a in range(self.A): + duration = self.task_asset_matrix[t, a, 1] # duration of task t with asset a + if duration > 0: # If valid pairing, make constraint + for s in range(self.S): + if s + duration > self.P: + row = np.zeros(num_variables, dtype=int) + row[self.Xts_start + t * self.S + s] = 1 + row[self.Xta_start + t * self.A + a] = 1 + rows.append(row) + + self.A_ub_10 = np.vstack(rows) + self.b_ub_10 = np.ones(self.A_ub_10.shape[0], dtype=int) # Each infeasible combination: Xta + Xts <= 1 + + if self.wordy > 1: + ''' + print("A_ub_10^T:") + print(" T1A1 T1A2 T2A1") # Header for 3 task-asset pairs example with T2A2 invalid + for i in range(self.Xta_start,self.Xta_end): + pstring = str(self.X_indices[i]) + for column in self.A_ub_10.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + for i in range(self.Xts_start,self.Xts_end): + pstring = str(self.X_indices[i]) + for column in self.A_ub_10.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_ub_10: ", self.b_ub_10) + ''' + print("Constraint 10 details:") + for i, row in enumerate(self.A_ub_10): + # Find the Xta and Xts variables that are 1 in this row + xta_indices = np.where(row[self.Xta_start:self.Xta_start + self.T * self.A] == 1)[0] + xts_indices = np.where(row[self.Xts_start:self.Xts_start + self.T * self.S] == 1)[0] + + if len(xta_indices) > 0 and len(xts_indices) > 0: + xta_idx = xta_indices[0] + xts_idx = xts_indices[0] + t_ta = xta_idx // self.A + a = xta_idx % self.A + t_ts = xts_idx // self.S + s = xts_idx % self.S + duration = self.task_asset_matrix[t_ta, a, 1] + print(f" Task {t_ta} exceeds period limit: Xta[{t_ta},{a}] + Xts[{t_ts},{s}] ≤ 1 (start {s} + duration {duration} > {self.P})") + + A_ub_list.append(self.A_ub_10) + b_ub_list.append(self.b_ub_10) + + if self.wordy > 0: + print("Constraint 10 built.") + + # 11) The total number of task period pairs must be greater than or equal to the number of task-start time pairs + ''' + This ensures that the task start-time decision variable is non-zero if a task is assigned to any period. + + (Xtp_00 + ... + Xtp_TP) >= (Xts_00 + ... + Xts_TS) # for all tasks t in range(0:T) + ''' + """ + A_lb_11 = np.zeros((self.T, num_variables), dtype=int) + b_lb_11 = np.ones(self.T, dtype=int) * 2 + + for t in range(self.T): + A_lb_11[t, (self.Xtp_start + t * self.P):(self.Xtp_start + t * self.P + self.P)] = 1 + A_lb_11[t, (self.Xts_start + t * self.S):(self.Xts_start + t * self.S + self.S)] = 1 + + if self.wordy > 1: + print("A_lb_11^T:") + print(" T1 T2") # Header for 2 tasks + for i in range(self.Xtp_start,self.Xts_end): + pstring = str(self.X_indices[i]) + for column in A_lb_11.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_lb_11: ", b_lb_11) + + A_lb_list.append(A_lb_11) + b_lb_list.append(b_lb_11) + + if self.wordy > 0: + print("Constraint 11 built.") + """ + # 12) The period an asset is assigned to must match the period the task in the task-asset pair is assigned to + ''' + This ensures the chosen task and asset in a task asset pair are assigned to the same period. This means that if an asset + is assigned to a task, then the corresponding task-period and asset-period pairs must be equal. + + if Xta = 1, then Xtp = Xap, else if Xta = 0, then Xtp and Xap can be anything. This requires two constriants: + + Xtp[t, p] - Xap[a, p] <= 1 - Xta[t, a] --> Xtp[t, p] - Xap[a, p] + Xta[t, a] <= 1 + Xtp[t, p] - Xap[a, p] >= -(1 - Xta[t, a]) --> Xtp[t, p] - Xap[a, p] + Xta[t, a] >= 0 + + ''' + """ + A_12 = np.zeros((self.T * self.A * self.P, num_variables), dtype=int) + b_ub_12 = np.ones(self.T * self.A * self.P, dtype=int) + b_lb_12 = np.zeros(self.T * self.A * self.P, dtype=int) + + row = 0 + for t in range(self.T): + for a in range(self.A): + for p in range(self.P): + A_12[row, self.Xtp_start + t * self.P + p] = 1 + A_12[row, self.Xap_start + a * self.P + p] = -1 + A_12[row, self.Xta_start + t * self.A + a] = 1 + + row += 1 + """ + """ + rows_ub = [] + rows_lb = [] + + for t in range(self.T): + for a in range(self.A): + # Only create constraints for valid task-asset pairs + if self.task_asset_matrix[t, a, 1] > 0: # Valid pairing (duration > 0) + for p in range(self.P): + row = np.zeros(num_variables, dtype=int) + row[self.Xtp_start + t * self.P + p] = 1 # Xtp[t,p] + row[self.Xap_start + a * self.P + p] = -1 # -Xap[a,p] + row[self.Xta_start + t * self.A + a] = 1 # Xta[t,a] + + rows_ub.append(row.copy()) # Upper bound constraint + rows_lb.append(row.copy()) # Lower bound constraint + + if rows_ub: + A_ub_12 = np.vstack(rows_ub) + b_ub_12 = np.ones(len(rows_ub), dtype=int) + A_lb_12 = np.vstack(rows_lb) + b_lb_12 = np.zeros(len(rows_lb), dtype=int) + + A_ub_list.append(A_ub_12) + b_ub_list.append(b_ub_12) + A_lb_list.append(A_lb_12) + b_lb_list.append(b_lb_12) + + if self.wordy > 1: + print("A_12^T:") + for i in range(self.Xta_start,self.Xap_end): + pstring = str(self.X_indices[i]) + for column in A_12.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_ub_12: ", b_ub_12) + print("b_lb_12: ", b_lb_12) + + A_ub_list.append(A_12) + b_ub_list.append(b_ub_12) + A_lb_list.append(A_12) + b_lb_list.append(b_lb_12) + + if self.wordy > 0: + print("Constraint 12 built.") + """ + # 14) if a task-starttime pair is selected, the corresponding task-period pair must be selected for the period equal to the start time plus the duration of the task + ''' + This ensures that if a task is assigned a start time, the corresponding task-period pair for the period equal to the start time plus the duration of the task is also selected. + Xts[t, s] <= Xtp[t, s : s + d] # for all tasks t in range(0:T) and start times s in range(0:S) where d is the duration of task t with the asset assigned to it + ''' + + # TODO: commenting out this constraint allows the optimizer to find an optimal solution + + # TODO: this is very very close. The Xtp are being assigned blocks equal to the starttime + duration. But it is causing the optimizer to fail...? + rows_14a = [] + vec_14a = [] + rows_14b = [] + vec_14b = [] + #rows = [] + #vec = [] + + # 14a) Simple start time to period mapping: Xts[t,s] <= Xtp[t,s] + for t in range(self.T): + for s in range(self.S): + if s < self.P: # Only if start time is within valid periods + row = np.zeros(num_variables, dtype=int) + row[self.Xts_start + t * self.S + s] = 1 # Xts[t,s] + row[self.Xtp_start + t * self.P + s] = -1 # -Xtp[t,s] + rows_14a.append(row) + vec_14a.append(0) # Xts[t,s] - Xtp[t,s] <= 0 + + ''' + for t in range(self.T): + for a in range(self.A): + duration = self.task_asset_matrix[t, a, 1] + if duration > 0: # If valid pairing, make constraint + for s in range(min(self.S, self.P - duration + 1)): + row = np.zeros(num_variables, dtype=int) + row[self.Xta_start + t * self.A + a] = 1 + row[self.Xts_start + t * self.S + s] = -1 + start_idx = self.Xtp_start + t * self.P + s + end_idx = min(start_idx + duration, self.Xtp_start + (t + 1) * self.P) + row[start_idx:end_idx] = 1 + #row[self.Xtp_start + t * self.P + s : self.Xtp_start + t * self.P + s + duration] = 1 + rows.append(row) + vec.append(1) + ''' + # 14b) Duration enforcement: if task t uses asset a and starts at s, + # then it must be active for duration periods + for t in range(self.T): + for a in range(self.A): + duration = self.task_asset_matrix[t, a, 1] + if duration > 0: # Valid task-asset pairing + for s in range(min(self.S, self.P - duration + 1)): # Valid start times + for p in range(s, min(s + duration, self.P)): # Each period in duration + row = np.zeros(num_variables, dtype=int) + # If Xta[t,a] = 1 AND Xts[t,s] = 1, then Xtp[t,p] = 1 + # Constraint: Xta[t,a] + Xts[t,s] - Xtp[t,p] <= 1 + row[self.Xta_start + t * self.A + a] = 1 # Xta[t,a] + row[self.Xts_start + t * self.S + s] = 1 # Xts[t,s] + row[self.Xtp_start + t * self.P + p] = -1 # -Xtp[t,p] + rows_14b.append(row) + vec_14b.append(1) # Xta[t,a] + Xts[t,s] - Xtp[t,p] <= 1 + + + #A_lb_14 = np.vstack(rows) + #b_lb_14 = np.array(vec, dtype=int) + + if rows_14a: + self.A_ub_14a = np.vstack(rows_14a) + self.b_ub_14a = np.array(vec_14a, dtype=int) + A_ub_list.append(self.A_ub_14a) + b_ub_list.append(self.b_ub_14a) + + if rows_14b: + self.A_ub_14b = np.vstack(rows_14b) + self.b_ub_14b = np.array(vec_14b, dtype=int) + A_ub_list.append(self.A_ub_14b) + b_ub_list.append(self.b_ub_14b) + + if self.wordy > 1: + ''' + print("A_lb_14^T:") + print(" T1A1S1 T1A2S1 ...") # Header for 3 task-asset pairs example with T2A2 invalid + for i in range(self.Xta_start,self.Xta_end): + pstring = str(self.X_indices[i]) + for column in self.A_lb_14.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + for i in range(self.Xtp_start,self.Xtp_end): + pstring = str(self.X_indices[i]) + for column in self.A_lb_14.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + for i in range(self.Xts_start,self.Xts_end): + pstring = str(self.X_indices[i]) + for column in self.A_lb_14.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_lb_14: ", self.b_ub_14) + ''' + print("Constraint 14a details:") + if hasattr(self, 'A_ub_14a'): + for i, row in enumerate(self.A_ub_14a): + xts_indices = np.where(row[self.Xts_start:self.Xts_start + self.T * self.S] == 1)[0] + xtp_indices = np.where(row[self.Xtp_start:self.Xtp_start + self.T * self.P] == -1)[0] + if len(xts_indices) > 0 and len(xtp_indices) > 0: + xts_idx = xts_indices[0] + xtp_idx = xtp_indices[0] + t_ts = xts_idx // self.S + s = xts_idx % self.S + t_tp = xtp_idx // self.P + p = xtp_idx % self.P + print(f" Start-period mapping: Xts[{t_ts},{s}] - Xtp[{t_tp},{p}] ≤ 0") + + print("Constraint 14b details:") + if hasattr(self, 'A_ub_14b'): + for i, row in enumerate(self.A_ub_14b): + xta_indices = np.where(row[self.Xta_start:self.Xta_start + self.T * self.A] == 1)[0] + xts_indices = np.where(row[self.Xts_start:self.Xts_start + self.T * self.S] == 1)[0] + xtp_indices = np.where(row[self.Xtp_start:self.Xtp_start + self.T * self.P] == -1)[0] + + if len(xta_indices) > 0 and len(xts_indices) > 0 and len(xtp_indices) > 0: + xta_idx = xta_indices[0] + xts_idx = xts_indices[0] + xtp_idx = xtp_indices[0] + t_ta = xta_idx // self.A + a = xta_idx % self.A + t_ts = xts_idx // self.S + s = xts_idx % self.S + t_tp = xtp_idx // self.P + p = xtp_idx % self.P + print(f" Duration enforcement: Xta[{t_ta},{a}] + Xts[{t_ts},{s}] - Xtp[{t_tp},{p}] ≤ 1") + + if self.wordy > 0: + print("Constraint 14 built.") + + # 15) the number of task-starttime pairs must be equal to the number of tasks + ''' + This ensures that each task is assigned a start time. + + (Xts_00 + ... + Xts_TS) = 1 + ''' + ''' + A_eq_15 = np.zeros((1, num_variables), dtype=int) + b_eq_15 = np.array([self.T], dtype=int) + + A_eq_15[0,self.Xts_start:self.Xts_end] = 1 + ''' + self.A_eq_15 = np.zeros((self.T, num_variables), dtype=int) + self.b_eq_15 = np.ones(self.T, dtype=int) + + for t in range(self.T): + self.A_eq_15[t, (self.Xts_start + t * self.S):(self.Xts_start + t * self.S + self.S)] = 1 + + if self.wordy > 1: + ''' + print("A_eq_15^T:") + for i in range(self.Xts_start,self.Xts_end): + pstring = str(self.X_indices[i]) + for column in self.A_eq_15.transpose()[i]: + pstring += f"{ column:5}" + print(pstring) + print("b_eq_15: ", self.b_eq_15) + ''' + print("Constraint 15 details:") + for t in range(self.T): + start_vars = [f"Xts[{t},{s}]" for s in range(self.S)] + print(f" Task {t} start assignment: {' + '.join(start_vars)} = 1") + + A_eq_list.append(self.A_eq_15) + b_eq_list.append(self.b_eq_15) + + if self.wordy > 0: + print("Constraint 15 built.") + + # 16) Each task must be active for exactly the duration of its assigned asset + ''' + This constraint works together with Constraint 14b to ensure proper duration handling: + - Constraint 14b: Ensures tasks are active during their assigned duration periods (lower bound) + - Constraint 16: Ensures tasks are active for exactly their total duration (upper bound) + + For each task t, the sum of Xtp periods must equal the duration of the assigned asset: + sum(Xtp[t,p] for p in P) = sum(Xta[t,a] * duration[t,a] for a in A) + ''' + rows_16 = [] + vec_16 = [] + + for t in range(self.T): + row = np.zeros(num_variables, dtype=int) + # Left side: sum of all periods for task t + for p in range(self.P): + row[self.Xtp_start + t * self.P + p] = 1 + # Right side: subtract duration * assignment for each asset + for a in range(self.A): + duration = self.task_asset_matrix[t, a, 1] + if duration > 0: + row[self.Xta_start + t * self.A + a] = -duration + + rows_16.append(row) + vec_16.append(0) # sum(Xtp) - sum(duration * Xta) = 0 + + if rows_16: + self.A_eq_16 = np.vstack(rows_16) + self.b_eq_16 = np.array(vec_16, dtype=int) + A_eq_list.append(self.A_eq_16) + b_eq_list.append(self.b_eq_16) + + if self.wordy > 1: + print("Constraint 16 details:") + for t in range(self.T): + period_vars = [f"Xtp[{t},{p}]" for p in range(self.P)] + asset_terms = [] + for a in range(self.A): + duration = self.task_asset_matrix[t, a, 1] + if duration > 0: + asset_terms.append(f"{duration}*Xta[{t},{a}]") + if asset_terms: + print(f" Task {t} duration: {' + '.join(period_vars)} = {' + '.join(asset_terms)}") + + if self.wordy > 0: + print("Constraint 16 built.") + + # 17) Weather constraints: task-asset pairs cannot be assigned in periods with incompatible weather + ''' + Assets have maximum weather conditions they can operate in (stored as 'max_weather' in asset dict). + If the weather in period p exceeds an asset's max_weather capability, then no task can be + assigned to that asset in that period. + + For each asset a, period p, and task t: + If weather[p] > asset[a]['max_weather'], then Xta[t,a] + Xtp[t,p] <= 1 + + This prevents simultaneous assignment of both the task-asset pair AND the task-period pair + when weather conditions are incompatible. + ''' + rows_17 = [] + vec_17 = [] + + for a in range(self.A): + # Determine the weather capability of asset group a + # An asset group can only operate in conditions that ALL its individual assets can handle + # So we take the minimum max_weather across all individual assets in the group + asset_group_max_weather = float('inf') # Start with no limit + + if a in self.asset_group_to_individual_assets: + individual_asset_indices = self.asset_group_to_individual_assets[a] + for individual_asset_idx in individual_asset_indices: + individual_max_weather = self.assets[individual_asset_idx].get('max_weather', float('inf')) + asset_group_max_weather = min(asset_group_max_weather, individual_max_weather) + + for p in range(self.P): + period_weather = self.weather[p] + + if period_weather > asset_group_max_weather: + # Weather in period p is too severe for asset group a + for t in range(self.T): + # Check if this task-asset pair is valid (positive duration and cost) + if (self.task_asset_matrix[t, a, 0] >= 0 and + self.task_asset_matrix[t, a, 1] > 0): + + # Prevent task t from using asset group a in period p due to weather + row = np.zeros(num_variables, dtype=int) + row[self.Xta_start + t * self.A + a] = 1 # Xta[t,a] + row[self.Xtp_start + t * self.P + p] = 1 # Xtp[t,p] + + rows_17.append(row) + vec_17.append(1) # Xta[t,a] + Xtp[t,p] <= 1 (can't have both = 1) + + # Build constraint matrices if we have any weather constraints + if rows_17: + self.A_ub_17 = np.vstack(rows_17) + self.b_ub_17 = np.array(vec_17, dtype=int) + A_ub_list.append(self.A_ub_17) + b_ub_list.append(self.b_ub_17) + + if self.wordy > 1: + print("Constraint 17 details:") + for i, row in enumerate(self.A_ub_17): + xta_indices = np.where(row[self.Xta_start:self.Xta_start + self.T * self.A] == 1)[0] + xtp_indices = np.where(row[self.Xtp_start:self.Xtp_start + self.T * self.P] == 1)[0] + + if len(xta_indices) > 0 and len(xtp_indices) > 0: + xta_idx = xta_indices[0] + xtp_idx = xtp_indices[0] + t_ta = xta_idx // self.A + a = xta_idx % self.A + t_tp = xtp_idx // self.P + p = xtp_idx % self.P + + # Get weather info + period_weather = self.weather[p] if p < len(self.weather) else 0 + asset_max_weather = self.asset_groups[a].get('max_weather', float('inf')) + + print(f" Weather constraint: Xta[{t_ta},{a}] + Xtp[{t_tp},{p}] ≤ 1 (weather {period_weather} > max {asset_max_weather})") + + if i >= 4: # Limit output to avoid too much detail + remaining = len(rows_17) - i - 1 + if remaining > 0: + print(f" ... and {remaining} more weather constraints") + break + + if self.wordy > 0: + print(f"Constraint 17 built with {len(rows_17)} weather restrictions.") + else: + if self.wordy > 0: + print("Constraint 17 built (no weather restrictions needed).") + + + # --- End Constraints --- + + if self.wordy > 0: + print("All constraints built. Stacking and checking constraints...") + + # --- Assemble the SciPy Constraints --- + # A series of linear constraints required by the solver by stacking the constraint matrices and limits vectors + # The number of rows in these matrices is equal to the number of constraints, so they can be vertically stacked + + # Check num columns of all constraint matrices matches number of decision variables before stacking + for i, A in enumerate(A_ub_list): + if A.size > 0 and A.shape[1] != num_variables: + raise ValueError(f"Upper bound constraint matrix {i} has incorrect number of columns. Expected {num_variables}, got {A.shape[1]}.") + for i, A in enumerate(A_eq_list): + if A.size > 0 and A.shape[1] != num_variables: + raise ValueError(f"Equality constraint matrix {i} has incorrect number of columns. Expected {num_variables}, got {A.shape[1]}.") + for i, A in enumerate(A_lb_list): + if A.size > 0 and A.shape[1] != num_variables: + raise ValueError(f"Lower bound constraint matrix {i} has incorrect number of columns. Expected {num_variables}, got {A.shape[1]}.") + + # Stack, check shapes of final matrices and vectors, and save the number of constraints for later use + if len(A_ub_list) > 0: + A_ub = np.vstack(A_ub_list) # upperbound coefficient matrix + b_ub = np.concatenate(b_ub_list) # upperbound limits vector + if A_ub.shape[0] != b_ub.shape[0]: + raise ValueError(f"A_ub and b_ub have inconsistent number of rows. A_ub has {A_ub.shape[0]}, b_ub has {b_ub.shape[0]}.") + self.num_ub_constraints = A_ub.shape[0] + else: + self.num_ub_constraints = 0 + + if len(A_eq_list) > 0: + A_eq = np.vstack(A_eq_list) # equality coefficient matrix + b_eq = np.concatenate(b_eq_list) # equality limits vector + if A_eq.shape[0] != b_eq.shape[0]: + raise ValueError(f"A_eq and b_eq have inconsistent number of rows. A_eq has {A_eq.shape[0]}, b_eq has {b_eq.shape[0]}.") + self.num_eq_constraints = A_eq.shape[0] + else: + self.num_eq_constraints = 0 + + if len(A_lb_list) > 0: + A_lb = np.vstack(A_lb_list) # lowerbound coefficient matrix + b_lb = np.concatenate(b_lb_list) # lowerbound limits vector + if A_lb.shape[0] != b_lb.shape[0]: + raise ValueError(f"A_lb and b_lb have inconsistent number of rows. A_lb has {A_lb.shape[0]}, b_lb has {b_lb.shape[0]}.") + self.num_lb_constraints = A_lb.shape[0] + else: + self.num_lb_constraints = 0 + + if self.wordy > 0: + print(f"Final constraint matrices built with {self.num_ub_constraints} upperbound constraints, {self.num_eq_constraints} equality constraints, and {self.num_lb_constraints} lowerbound constraints.") + + # Build constraint objects if they exist + constraints = [] + if self.num_ub_constraints > 0: + constraints.append(optimize.LinearConstraint(A = A_ub, ub = b_ub)) + if self.num_eq_constraints > 0: + constraints.append(optimize.LinearConstraint(A = A_eq, lb = b_eq, ub = b_eq)) # equality constraints have same lower and upper bounds (thuis equality) + if self.num_lb_constraints > 0: + constraints.append(optimize.LinearConstraint(A = A_lb, lb = b_lb)) + + # --- Save the optimization problem parameters for later use --- + self.values = values + self.constraints = constraints + self.integrality = integrality + self.bounds = bounds + + if self.wordy > 0: + print("Optimizer set up complete.") + + def optimize(self, threads = -1): + ''' + Run the optimizer + + Inputs + ------ + threads : int, None + Number of threads to use (<0 or None to auto-detect). + + Returns + ------- + None + ''' + + # --- set up the optimizer --- + # if the optimizer has not been set up yet, set it up + if not hasattr(self, 'values') or not hasattr(self, 'constraints') or not hasattr(self, 'integrality') or not hasattr(self, 'bounds'): + self.set_up_optimizer() + + if self.wordy > 0: + print("Starting optimization...") + + # --- Check for valid inputs --- + if not isinstance(threads, int) and threads is not None: + raise ValueError("threads must be an integer or None.") + + # detect max number of threads on system if requested for passing into solver + if threads < 0 or threads is None: + threads = os.cpu_count() + if threads is None: + raise ValueError("Could not detect number of CPU threads on system.") + + # --- call the solver --- + res = milp( + c=self.values, # milp function doesnt not care about the shape of values, just that it is a 1D array + constraints=self.constraints, + integrality=self.integrality, # milp function doesnt not care about the shape of values, just that it is a 1D array + bounds=self.bounds + ) + + if self.wordy > 0: + print("Solver complete. Analyzing results...") + print("Results: \n", res) + + # --- process the results --- + if res.success: + # Reshape the flat result back into the (num_periods, num_tasks, num_assets) shape + + if self.wordy > 5: + print("Decision variable [periods][tasks][assets]:") + for i in range(len(self.X_indices)): + print(f" {self.X_indices[i]}: {int(res.x[i])}") + + if self.wordy > 0: + print("Optimization successful. The following schedule was generated:") + + x_opt = res.x # or whatever your result object is + Xta = x_opt[self.Xta_start:self.Xta_end].reshape((self.T, self.A)) + Xtp = x_opt[self.Xtp_start:self.Xtp_end].reshape((self.T, self.P)) + #Xap = x_opt[self.Xap_start:self.Xap_end].reshape((self.A, self.P)) + Xts = x_opt[self.Xts_start:self.Xts_end].reshape((self.T, self.S)) + + for p in range(self.P): + weather_condition = self.weather[p] + pstring = f"Period {p:2d} (weather {weather_condition:2d}): " + + for t in range(self.T): + if Xtp[t, p] > 0: + # Find assigned asset for this task + a_assigned = np.argmax(Xta[t, :]) # assumes only one asset per task + cost = self.task_asset_matrix[t, a_assigned, 0] + duration = self.task_asset_matrix[t, a_assigned, 1] + + # Get asset group information + asset_group = self.asset_groups[a_assigned] + if isinstance(asset_group, dict): + # Handle different asset group formats + if 'assets' in asset_group: + # Format: {'assets': ['asset1', 'asset2']} + asset_list = asset_group['assets'] + if isinstance(asset_list, list) and len(asset_list) > 0: + asset_name = f"Group({', '.join(asset_list)})" + else: + asset_name = f"Asset Group {a_assigned}" + else: + # Format: {'group_name': ['asset1', 'asset2']} + group_names = list(asset_group.keys()) + if group_names: + group_name = group_names[0] # Take first group name + asset_list = asset_group[group_name] + if isinstance(asset_list, list): + asset_name = f"{group_name}({', '.join(asset_list)})" + else: + asset_name = group_name + else: + asset_name = f"Asset Group {a_assigned}" + else: + asset_name = f"Asset Group {a_assigned}" + + # Format with fixed widths for proper alignment + task_info = f"{asset_name:<20} → Task {t:2d} (cost: {cost:6.0f}, dur: {duration:2d})" + pstring += f"{task_info:<55} | " + else: + # Empty slot with proper spacing + pstring += f"{'':55} | " + + print(pstring) + + # NEW: Compact Gantt-style visualization + if self.wordy > 0: + print("\n" + "="*80) + print("GANTT CHART VIEW (Tasks as rows, Periods as columns)") + print("="*80) + + # Header row with period numbers + header = "Task Name |" + for p in range(self.P): + header += f"{p%10}" + header += "| Asset Group" + print(header) + print("-" * len(header)) + + # Each task gets a row + for t in range(self.T): + task_name = self.tasks[t] if t < len(self.tasks) else f"Task{t}" + row = f"{task_name:<20}|" + + # Find which asset group is assigned to this task + a_assigned = np.argmax(Xta[t, :]) + + for p in range(self.P): + if Xtp[t, p] > 0: + # Use a character to indicate this task is active + row += "█" + else: + row += " " + row += "|" + + # Add asset group information at the end of the row + asset_group = self.asset_groups[a_assigned] + if isinstance(asset_group, dict): + group_names = list(asset_group.keys()) + if group_names: + group_name = group_names[0] + asset_list = asset_group[group_name] + if isinstance(asset_list, list): + row += f" {group_name}: {', '.join(asset_list)}" + else: + row += f" {group_name}" + else: + row += f" Group {a_assigned}" + else: + row += f" Group {a_assigned}" + + print(row) + + # Footer with weather conditions + weather_row = "Weather |" + for p in range(self.P): + weather_row += f"{self.weather[p]%10}" + weather_row += "|" + print("-" * len(header)) + print(weather_row) + print("="*80 + "\n") + + if self.wordy > 0: + print("Optimization function complete.") + + +if __name__ == "__main__": + + os.system("clear") # for clearing terminal on Mac + + # A simple dummy system to test the scheduler with weather constraints + + # Weather periods with varying conditions (1=calm, 2=moderate, 3=severe) + weather = [1, 1, 2, 3, 1] # 6 time periods with different weather conditions + + # Example tasks, assets, dependencies, and task_asset_matrix + tasks = [ + "task1", + "task2" + ] + assets = [ + {"name": "heavy_asset", "max_weather": 3}, # Can work in all weather conditions + {"name": "light_asset", "max_weather": 1} # Can only work in calm weather (1) + ] + asset_groups = [ + {'assets': ['heavy_asset']}, + {'assets': ['light_asset', 'heavy_asset']}, + ] + + # task dependencies + task_dependencies = { + "task1": [], # task1 has no dependencies + "task2": ["task1"] # task2 depends on task1 + } + + # dependency types (optional - defaults to "finish_start" if not specified) + dependency_types = { + "task1->task2": "finish_start" # task2 starts after task1 finishes + } + + # offsets (optional - defaults to 0 if not specified) + # In this example, no offset is needed - task2 can start immediately after task1 finishes + offsets = {} + + # cost and duration tuples for each task-asset pair. -1 indicates asset-task paring is invalid + task_asset_matrix = np.array([ + [(2000, 2), (1000, 3)], # task1: heavy_asset (expensive but fast), light_asset (cheap but slow) + [(1500, 3), (-1, -1)] # task2: both assets can do it, light_asset is cheaper + ]) + + # Expected behavior with weather constraints: + # - light_asset can only work in periods 0,1,5 (weather=1) + # - heavy_asset can work in any period (max_weather=3) + # - task2 depends on task1 (finish_start dependency) + + # Find the minimum time period duration based on the task_asset_matrix + min_duration = np.min(task_asset_matrix[:, :, 1][task_asset_matrix[:, :, 1] > 0]) # minimum non-zero duration + + # Sandbox for building out the scheduler + scheduler = Scheduler(task_asset_matrix, tasks, assets, task_dependencies, dependency_types, offsets, weather, min_duration, asset_groups=asset_groups) + scheduler.optimize() + + a = 2 + + + # # A more complex dummy system to test the scheduler (uncomment and comment out above to run) + + # # 10 weather periods = 10 time periods + # weather = [1]*5 + [2]*1 + [3]*1 + [1]*3 # Three weather types for now. Example weather windows. The length of each window is equal to min_duration + + # # Example tasks, assets, dependencies, and task_asset_matrix. Eventually try with more tasks than assets, more assets than tasks, etc. + # tasks = [ + # "task1", + # "task2", + # "task3" + # ] + # assets = [ + # {"name": "asset1", "max_weather" : 3}, + # {"name": "asset2", "max_weather" : 2}, + # {"name": "asset3", "max_weather" : 1}, + # {"name": "asset4", "max_weather" : 1} + # ] + + # # task dependencies + # task_dependencies = { + # "task1": [], # task1 has no dependencies + # "task2": ["task1"], # task2 depends on task1 + # "task3": ["task1", "task2"] # task3 depends on both task1 and task2 + # } + + # # dependency types (optional - demonstrates different types) + # dependency_types = { + # "task1->task2": "finish_start", # task2 starts after task1 finishes (default) + # "task1->task3": "start_start", # task3 starts when task1 starts + # "task2->task3": "same_asset" # task3 must use same asset as task2 + # } + + # # random cost and duration tuples for each task-asset pair. -1 indicates asset-task paring is invalid + # task_asset_matrix = np.array([ + # [(3000, 2), (2000, 3), (1000, 4), (4000, 5)], # task 1: asset 1, asset 2, asset 3, asset 4 + # [(1200, 5), ( -1,-1), ( -1,-1), ( -1,-1)], # task 2: asset 1, asset 2, asset 3, asset 4 + # [(2500, 3), (1500, 2), ( -1,-1), ( -1,-1)] # task 3: asset 1, asset 2, asset 3, asset 4 + # ]) + + # # optimal assignment: task 1 with asset 1 in periods 1-2, task 2 with asset 1 in period 3 + + # # Find the minimum time period duration based on the task_asset_matrix + # min_duration = np.min(task_asset_matrix[:, :, 1][task_asset_matrix[:, :, 1] > 0]) # minimum non-zero duration + + # # Sandbox for building out the scheduler + # scheduler = Scheduler(task_asset_matrix, tasks, assets, task_dependencies, dependency_types, weather, min_duration) + # scheduler.optimize() \ No newline at end of file diff --git a/famodel/irma/schedulerREADME.md b/famodel/irma/schedulerREADME.md new file mode 100644 index 00000000..0a3f4747 --- /dev/null +++ b/famodel/irma/schedulerREADME.md @@ -0,0 +1,173 @@ +# Scheduler Mathematical Formulation (as implemented in scheduler.py) + +This document describes the mathematical formulation of the scheduling problem solved by the `Scheduler` class, using multiple decision variables and following the numbering and naming conventions in `scheduler.py`. + +## Sets and Indices +- $T$: Set of tasks, $t = 0, \ldots, T-1$ +- $A$: Set of assets, $a = 0, \ldots, A-1$ +- $P$: Set of periods, $p = 0, \ldots, P-1$ +- $S$: Set of possible start periods, $s = 0, \ldots, S-1$ ($S = P$) + +## Parameters +- $c_{t,a}$: Cost of assigning asset $a$ to task $t$ +- $d_{t,a}$: Duration (in periods) required for asset $a$ to complete task $t$ + +## Decision Variables +- $X_{t,a} \in \{0,1\}$: 1 if task $t$ is assigned to asset $a$, 0 otherwise +- $X_{t,p} \in \{0,1\}$: 1 if task $t$ is active in period $p$, 0 otherwise +- $X_{t,s} \in \{0,1\}$: 1 if task $t$ starts at period $s$, 0 otherwise + +$x = [X_{t,a} X_{t,p} X_{t,s}] $ + +## Objective Function +Minimize total cost (cost is only determined by task-asset assignment): + +$$ +\min \sum c_{t,a} x +$$ + +The $c$ vector also contains 'cost' penalties for later start times $(X_{t,s})$ to prioritize tasks starting as early as they can (used to be Constraint 7) + +## Constraints + +The below constraints are formulated such that they can be made into three giant matricies of upper and lower bounds and equalities. +When added together, these are the upperbound constraint, the lower bound constraint, and the equality constraint. The solver +attempts to solve the object objective function subject to: + +subject to: + +$$ +\text{1) } A_{ub} \text{ } x \text{ } \leq b_{ub} \\ +\text{2) } A_{eq} \text{ } x \text{ } = b_{eq} \\ +\text{3) } A_{lb} \text{ } x \text{ } \geq b_{lb} \\ +\text{4) } 0 \leq \text{ } x \text{ } \leq 1 \\ +$$ + +### 1. Task-Asset Validity +Only valid task-asset pairs can be assigned: + +$$ +X_{t,a} = 0 \quad \forall t, a \text{ where } c_{t,a} < 0 \text{ or } d_{t,a} < 0 +$$ + +### 3. At Least One Asset Per Task +Sum of all task-asset pairs must be >= 1 for each task: + +$$ +\sum_{a=0}^{A-1} X_{t,a} \geq 1 \quad \forall t +$$ + +### 15. The number of task-starttime pairs must be equal to the number of tasks +This ensures that each task is assigned exactly 1 start time. + +$$ +\sum_{s=0}^{S-1} X_{t,s} = 1 \quad \forall t +$$ + +### 10. A task cannot start in a period where its duration would exceed the maximum number of time periods +This ensures that a task is not assigned to a period that would cause it to exceed the total number of periods available. + +$$ +X_{t,a}[t,a] + X_{t,s} <= 1 \quad \forall t, a, s \text{ where } d_{t,a} > 0,\ s + d_{t,a} > P +$$ + +When a task-asset pair is assigned, then for each start time of that task, it $(X_{t,s})$ has to be zero under these conditions, where s+d>P + +### 14a. A task must occupy the same period that it starts in +This ensures that the task start-time decision variable is non-zero if a task is assigned to any period. + +$$ +X_{t,p}[t,s] \geq X_{t,s}[t,s] \quad \forall t +$$ + +In every start time for each task, the corresponding period must be equal to that start time decision variable + +### 14b. A task-asset assignment must be active for the duration required by that assignment + +14a ensured that if a task is assigned a start time, the corresponding task-period pair for the period equal to the start time is selected. + +14b ensures that if a task is assigned a start time, the number of periods equal to the duration of the task are also turned on. + +$$ +X_{t,a}[t,a] + X_{t,s}[t,s] - X_{t,p}[t,p] <= 1 \quad \forall t,a,s,p(s asset[a]['max_weather'], then $X_{t,a}[t,a] + X_{t,p}[t,p] <= 1$ + +Meaning, do not turn on the task in a period whether the period's weather is greater than the maximum allowable weather capability of the asset + +### 2. Task Dependencies +Tasks with dependencies must be scheduled according to their dependency rules. + +We have a set of different dependency type options: +- Finish-Start: the dependent task starts after the prerequisite task finishes +- Start-Start: the dependent task starts when the prerequisite task starts +- Finish-Finish: the dependent task finishes when the prerequisite task finishes +- Same-Asset: the dependent task must use the same asset as the prerequisite task + +For all valid start times s for task t, if $X_{t,s}[t,s]=1$, then there is some other start time $s_d$ for task d so that $X_{t,s}[d,s_d]=1$ and $s_d + duration <= s$ + +$X_{t,s}[t,s] <= \sum X_{t,s}[d,s_d]$ from $s$ to $sd+duration$ + +--- + +**Notes:** +- $d_{t,a}$ is the duration for asset $a$ assigned to task $t$. If multiple assets are possible, $X_{t,a}$ determines which duration applies. +- This approach separates assignment, activity, and start variables for clarity and easier constraint management. + +- Constraints can be extended for parallel tasks, multiple assets per task, or other requirements as needed. +- One of the better references to understand this approach is `Irwan et al. 2017 `_ +- The `scheduler.py` file also has some TODO's, which are focused on software development. diff --git a/famodel/irma/scheduler_example.py b/famodel/irma/scheduler_example.py new file mode 100644 index 00000000..daa74376 --- /dev/null +++ b/famodel/irma/scheduler_example.py @@ -0,0 +1,79 @@ +from famodel.irma.scheduler import Scheduler +import numpy as np + + + +# weather +weather = [1, 1, 1, 1, 1] + +# tasks +tasks = [ +{ + 'name': "install_mooring", + 'requirements': ['mooring_reel', 'positioning'] +}, +{ + 'name': "install_anchor", + 'requirements': ['anchor_handling','positioning'] +} +] + +# assets +assets = [ +{ + 'name': 'AHTS', + 'capabilities': ['anchor_handling', 'mooring_reel', 'positioning'], + 'daily_cost': 50000, + 'max_weather': 2 +}, +{ + 'name': 'MPSV', + 'capabilities': ['mooring_reel', 'positioning'], + 'daily_cost': 25000, + 'max_weather': 1 +} +] + +# task-asset matrix +task_asset_matrix = np.array([ + [(2000, 2), (1000, 3), (2500, 3)], + [(1500, 3), (-1, -1), (4000, 2)] +]) + +# asset groups +asset_groups = [ +{ + 'assets': ['AHTS'], +}, +{ + 'assets': ['MPSV'], +}, +{ + 'assets': ['AHTS','MPSV'], +}, +] + +# task dependencies +task_dependencies = { +'install_mooring': ['install_anchor'] # Mooring installation depends on anchor installation +} + +# dependency types +dependency_types = { + 'install_anchor->install_mooring': 'start_start' # Anchor must finish before mooring starts +} + +offsets = { + #'install_anchor->install_mooring': 1 # Mooring installation to start 1 period after Anchor installation + 'install_anchor->install_mooring': (1, 'exact') # Tuple format: (value, type) +} + +# calculate the minimum duration +min_duration = np.min(task_asset_matrix[:, :, 1][task_asset_matrix[:, :, 1] > 0]) # minimum non-zero duration + +# intialize and run the scheduler +scheduler = Scheduler(task_asset_matrix, tasks, assets, task_dependencies, dependency_types, offsets, weather, min_duration, asset_groups=asset_groups) +scheduler.optimize() + +a = 2 + diff --git a/famodel/irma/scheduler_tutorial.ipynb b/famodel/irma/scheduler_tutorial.ipynb new file mode 100644 index 00000000..abb7fe5b --- /dev/null +++ b/famodel/irma/scheduler_tutorial.ipynb @@ -0,0 +1,1337 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a122639d", + "metadata": {}, + "source": [ + "# IRMA Scheduler Tutorial\n", + "\n", + "Welcome to the Interactive Tutorial for the **IRMA (Installation Resource Management & Analytics) Scheduler**!\n", + "\n", + "This notebook aims to guide new users/readers through the Mixed Integer Linear Programming (MILP) of the scheduler.\n", + "\n", + "## Introduction\n", + "Imagine you are planning the schedule of installation activities for the deployment of an offshore system. \n", + "\n", + "📋 **Many Tasks**: Discrete work activities that need to be completed (e.g., anchor installation, cable laying) \n", + "🚢 **Multiple Assets**: Resources (typically vessels) that can perform certain Tasks based on their capabilities (e.g., heavy-lift vessel, cable-laying vessel, support vessel) \n", + "⚙️ **Task-Asset Assignments**: Multiple Assets can be assigned to each Task, where each assignment has an associated cost and time duration \n", + "🌊 **Weather Windows**: Assets can only work in suitable sea conditions (e.g., wind speed limits, wave height limits) \n", + "💰 **Time and Cost**: The schedule should aim to minimize the total cost of all tasks performed\n", + "\n", + "How can the scheduler figure out what assets to assign to what tasks and when to perform each task to minimize time and/or cost?" + ] + }, + { + "cell_type": "markdown", + "id": "eb9a2e12", + "metadata": {}, + "source": [ + "### Mixed Integer Linear Programming (MILP)\n", + "\n", + "A type of optimization problem that uses a mixture of integer, binary, and continuous variables subject to linear constraints to minimize an objective. \n", + "\n", + "As an example, let's say you own a truck delivery company with 3 trucks and you need to decide which trucks to send out for delivery, where each truck ($x_i$) has a cost of delivery and a time duration. The goal is to minimize cost of delivery, under a constraint that the total delivery time needs to be at least 12 hours. The only decisions are to either send out the truck for delivery (1) or not (0).\n", + "\n", + "Minimize the cost function\n", + "$$ 500x_1 + 400x_2 + 300x_3 $$\n", + "\n", + "subject to specific time constraints (a little counterintuitive that we need a lower bound on time, but it'll help with the whole tutorial)\n", + "\n", + "$$ 7x_1 + 6x_2 + 4x_3 \\geq 12 $$\n", + "\n", + "A MILP solver will realize that it needs at least two trucks for delivery to satisfy the time constraint and it will also figure out that Truck 2 ($x_2$) and Truck 3 ($x_3$) will not satisfy the constraint and neither will Truck 1 ($x_1$) and Truck 3 ($x_3$). That leaves the options of Truck 1 and Truck 2, or all three trucks. It will choose only Truck 1 and Truck 2 since that minimizes cost. (This also assumes that each truck can only be used once).\n", + "\n", + "This tutorial only considers binary variables (for now),\n", + "\n", + "$$x \\in \\{0,1\\}$$\n", + "\n", + "to determine what decisions to make to minimize the objective of the scheduler." + ] + }, + { + "cell_type": "markdown", + "id": "dfbed4e7", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "First, let's import the necessary libraries and set up our environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "85932d75", + "metadata": {}, + "outputs": [], + "source": [ + "# Standard libraries\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import sys\n", + "import os\n", + "\n", + "# Add the FAModel path for imports\n", + "sys.path.append(os.path.join(os.getcwd(), 'famodel'))\n", + "\n", + "# Import the IRMA scheduler\n", + "from famodel.irma.scheduler import Scheduler\n", + "\n", + "# Set up plotting\n", + "plt.style.use('default')\n", + "plt.rcParams['figure.figsize'] = [12, 8]\n", + "\n", + "print(\"✅ Libraries imported successfully!\")\n", + "print(\"📊 Ready to explore the IRMA Scheduler!\")" + ] + }, + { + "cell_type": "markdown", + "id": "5fa0ce33", + "metadata": {}, + "source": [ + "## Simple Case: Two Tasks, Two Assets\n", + "\n", + "Let's start with the most basic scenario to understand the fundamentals:\n", + "\n", + "### Tasks\n", + "\n", + "Let's say that the installation of an offshore system requires two tasks: installing a mooring line, and installing an anchor, where each task has certain requirements that are needed to complete the task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "96182f3d", + "metadata": {}, + "outputs": [], + "source": [ + "tasks = [\n", + " {\n", + " 'name': \"install_mooring\",\n", + " 'requirements': ['mooring_reel', 'positioning']\n", + " },\n", + " {\n", + " 'name': \"install_anchor\",\n", + " 'requirements': ['anchor_handling','positioning']\n", + " }\n", + "]\n", + "\n", + "# Display task information\n", + "task_df = pd.DataFrame(tasks)\n", + "print(task_df)" + ] + }, + { + "cell_type": "markdown", + "id": "3312e500", + "metadata": {}, + "source": [ + "### Assets\n", + "\n", + "And that there are two vessels (assets) that could potentially be used to perform these installations, each with their own set of capabilities, daily cost, and an integer value to represent what weather conditions it can operate in. For example, the Multi-Purpose Supply Vessel (MPSV) cannot operate in wave heights greater than 2 m, but the Anchor Handling Tug Supply Vessel (AHTS) can, but no greater than 4 m." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f7436a7", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our installation vessel\n", + "assets = [\n", + " {\n", + " 'name': 'AHTS', \n", + " 'capabilities': ['anchor_handling', 'mooring_reel', 'positioning'],\n", + " 'daily_cost': 50000,\n", + " 'max_weather': 2\n", + " },\n", + " {\n", + " 'name': 'MPSV', \n", + " 'capabilities': ['mooring_reel', 'positioning'],\n", + " 'daily_cost': 25000,\n", + " 'max_weather': 1\n", + " }\n", + "]\n", + "\n", + "print(\"🚢 Asset Definition:\")\n", + "asset_df = pd.DataFrame(assets)\n", + "print(asset_df)" + ] + }, + { + "cell_type": "markdown", + "id": "bb6e9b04", + "metadata": {}, + "source": [ + "### The Task-Asset Matrix\n", + "\n", + "Through a process that is still yet to be determined (TODO...Stein has something started), we can generate a **Task-Asset Matrix** that defines the cost and duration to perform each task by each set of assets.\n", + "\n", + "Each row of the task-asset matrix represents a different task and each column of the task-asset matrix represents a combination of assets.\n", + "\n", + "Entries with values of -1 represent task-asset pairs that are not feasible. Something like installing an anchor with a kayak." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "109e75be", + "metadata": {}, + "outputs": [], + "source": [ + "# | AHTS | MPSV | AHTS+MPSV |\n", + "# Install Mooring | (c, d) | (c, d) | (c, d) |\n", + "# Install Anchor | (c, d) | (c, d) | (c, d) |\n", + "task_asset_matrix = np.array([\n", + " [(2000, 2), (1000, 3), (2500, 3)],\n", + " [(1500, 3), (-1, -1), (4000, 2)]\n", + "])\n", + "\n", + "print(\"🔗 Task-Asset Compatibility:\")\n", + "print(task_asset_matrix)" + ] + }, + { + "cell_type": "markdown", + "id": "3f952cc3", + "metadata": {}, + "source": [ + "### Asset Groups\n", + "\n", + "Different combinations of assets can be used for each task, and each produce a different cost and duration to perform the task based on the capabilities of the assets and the requirements of the task.\n", + "\n", + "The matrix generation process will filter out asset combinations that do not make sense (i.e., overlapping capabilities, maximum number of assets involved, extremely high costs, etc.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f52ed642", + "metadata": {}, + "outputs": [], + "source": [ + "asset_groups = [\n", + " {\n", + " 'assets': ['AHTS'], \n", + " },\n", + " {\n", + " 'assets': ['MPSV'], \n", + " },\n", + " {\n", + " 'assets': ['AHTS','MPSV'], \n", + " },\n", + "]\n", + "\n", + "print(\"🚢 Asset Groups\")\n", + "asset_group_df = pd.DataFrame(asset_groups)\n", + "print(asset_group_df)" + ] + }, + { + "cell_type": "markdown", + "id": "47d60811", + "metadata": {}, + "source": [ + "### Time Periods & Weather\n", + "\n", + "We can also define the planning horizon (timeline) as a set of time periods with given weather conditions. Time periods could be any duration of time (e.g., hours, days, weeks, etc.).\n", + "\n", + "Good weather is normally designated by a 1, OK weather is designated by a 2, and bad weather is designated by a 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34d54959", + "metadata": {}, + "outputs": [], + "source": [ + "weather = [1, 1, 1, 1, 1] # Start by defining 5 time periods, each with good weather\n", + "\n", + "print(\"📅 Planning Horizon:\")\n", + "weather_df = pd.DataFrame({\n", + " 'Weather_Condition': weather,\n", + " 'Description': ['Good weather'] * len(weather)\n", + "})\n", + "print(weather_df)" + ] + }, + { + "cell_type": "markdown", + "id": "81284df5", + "metadata": {}, + "source": [ + "### Running the Simple Scheduler\n", + "\n", + "Now let's create and run our first scheduler instance, which simply sets up many variables within the Scheduler class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c920c16f", + "metadata": {}, + "outputs": [], + "source": [ + "# Create the scheduler for our simple scenario\n", + "print(\"🔧 Creating scheduler for simple scenario...\")\n", + "\n", + "scheduler = Scheduler(\n", + " tasks=tasks,\n", + " assets=assets,\n", + " task_asset_matrix=task_asset_matrix,\n", + " weather=weather,\n", + " asset_groups=asset_groups,\n", + " wordy=0 # Enable some debug output\n", + ")\n", + "\n", + "print(\"✅ Scheduler created successfully!\")" + ] + }, + { + "cell_type": "markdown", + "id": "d9604377", + "metadata": {}, + "source": [ + "## Understanding the Mathematical Constraints\n", + "\n", + "Before we dive into the results of the scheduler, let's understand how the IRMA scheduler actually works under the hood\n", + "\n", + "### Decision Variables:\n", + "\n", + "The scheduler initialization uses three types of binary decision variables in the MILP optimization (each can be 0 or 1):\n", + "\n", + "**📋 Task-Asset Assignment Variables** `Xta[t,a]`: determines whether task $t$ is assigned to asset group $a$\n", + "- `Xta[0,1] = 1` means \"Task 0 is assigned to Asset 1\"\n", + "- `Xta[0,1] = 0` means \"Task 0 is NOT assigned to Asset 1\"\n", + "\n", + "**⏰ Task-Period Activity Variables** `Xtp[t,p]`: determines if task $t$ is active in period $p$\n", + "- `Xtp[0,3] = 1` means \"Task 0 is active during Period 3\" \n", + "- `Xtp[0,3] = 0` means \"Task 0 is NOT active during Period 3\"\n", + "\n", + "**🚀 Task Start Time Variables** `Xts[t,s]`: determines if task $t$ starts at period $s$\n", + "- `Xts[0,2] = 1` means \"Task 0 starts at Period 2\"\n", + "- `Xts[0,2] = 0` means \"Task 0 does NOT start at Period 2\"\n", + "\n", + "The only 'decisions' are whether certain asset groups are assigned to certain tasks $(X_{t,a})$, and when the task starts $(X_{t,vas}r)i$a.bles The $X_{t,p}$ are also included to help organize the constraints in determining what periods each task occupy, based on the duration of the task-asset combination defined in the task-asset matrix.\n", + "\n", + "The full decision variable vector $x$ then follows the form of \n", + "\n", + "$$ x = [X_{t,a}, X_{t,p}, X_{t,s}] $$\n", + "\n", + "where the length depends on the number of tasks $T$, the number of asset groups $A$, and the number of periods $P$\n" + ] + }, + { + "cell_type": "markdown", + "id": "fab8e41a", + "metadata": {}, + "source": [ + "### Constraints\n", + "\n", + "Each constraint ensures the solution makes logical sense. In an MILP optimization, constraints are set to be linear and follow the form of \n", + "\n", + "$$\n", + " A_{ub} \\text{ } x \\text{ } \\leq b_{ub} \\\\\n", + " A_{eq} \\text{ } x \\text{ } = b_{eq} \\\\\n", + " A_{lb} \\text{ } x \\text{ } \\geq b_{lb} \\\\\n", + "$$\n", + "\n", + "where $A$ and $b$ represent large vectors that when multipled by the binary decision variables of the $x$ vector, need to satisfy the constraint according to $b$." + ] + }, + { + "cell_type": "markdown", + "id": "8178632c", + "metadata": {}, + "source": [ + "In the example at the top of this tutortial, we would structure the constraint as the following:\n", + "\n", + "$$\n", + "\\begin{bmatrix}\n", + "7 & 6 & 4\n", + "\\end{bmatrix}\n", + "\\begin{bmatrix}\n", + "x_1 \\\\ x_2 \\\\ x_3\n", + "\\end{bmatrix}\n", + "\\ge 12\n", + "$$\n", + "\n", + "This format is used to define many other constraints necessary for a logical sotion for the scheduler. Each constraint is explained below. But first, we run the `set_up_optimizer()` function to create all the constraints that we can then analyze through this tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00766591", + "metadata": {}, + "outputs": [], + "source": [ + "scheduler.set_up_optimizer()" + ] + }, + { + "cell_type": "markdown", + "id": "8b8d4ec3", + "metadata": {}, + "source": [ + "#### Constraint 1: Task-Asset Validity 🔒\n", + "\n", + "**English**: An asset group can only be assigned to a task if the asset group can perform the task.\n", + "\n", + "**Impact**: Prevents impossible assignments (Xta variables that correspond to invalid entries in the task-asset matrix from being turned on) that would break the physics of the problem.\n", + "\n", + "**Math**: \n", + "$$\n", + "X_{t,a} = 0 \\quad \\text{for all } (t,a) \\text{ where } c_{t,a} < 0 \\text{ or } d_{t,a} < 0\n", + "$$\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "41a2e1e6", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 1: Task-Asset Validity Matrix Construction\n", + "print(\"🔒 Constraint 1: Task-Asset Validity\")\n", + "print()\n", + "\n", + "if hasattr(scheduler, 'A_eq_1'):\n", + " print(f\"A_eq_1 matrix equations (shape: {scheduler.A_eq_1.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler.A_eq_1, scheduler.b_eq_1)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x = {b_val}\")\n", + "else:\n", + " print(\"No invalid task-asset pairs found - no constraints needed\")" + ] + }, + { + "cell_type": "markdown", + "id": "325b590e", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that\n", + "\n", + "$$\n", + "X_{t,a}[1,1] = 0\n", + "$$\n", + "\n", + "since that is the only entry of the task-asset matrix that is infeasible" + ] + }, + { + "cell_type": "markdown", + "id": "15391d22", + "metadata": {}, + "source": [ + "#### Constraint 3: Exactly One Asset Per Task ⚖️\n", + "\n", + "**English**: Each task must be assigned to exactly one asset group\n", + "\n", + "**Impact**: Prevents a task from being unassigned (would never complete) or over-assigned (physically impossible).\n", + "\n", + "**Math**: \n", + "$$\n", + "\\sum_{a=0}^{A-1} X_{t,a} = 1 \\quad \\forall t \\in \\{0, 1, \\ldots, T-1\\}\n", + "$$\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36b17bb8", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 3: Exactly One Asset Per Task Matrix Construction\n", + "print(\"⚖️ Constraint 3: Exactly One Asset Per Task\")\n", + "print()\n", + "\n", + "print(f\"A_eq_3 matrix equations (shape: {scheduler.A_eq_3.shape}):\")\n", + "for i, (row, b_val) in enumerate(zip(scheduler.A_eq_3, scheduler.b_eq_3)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x = {b_val}\")" + ] + }, + { + "cell_type": "markdown", + "id": "2b1a8158", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that\n", + "\n", + "$$ X_{t,a}[0,0] + X_{t,a}[0,1] + X_{t,a}[0,2] = 1 $$ and $$ X_{t,a}[1,0] + X_{t,a}[1,1] + X_{t,a}[1,2] = 1 $$\n", + "\n", + "which doesn't allow more than 1 asset group assignment per task\n" + ] + }, + { + "cell_type": "markdown", + "id": "a4283498", + "metadata": {}, + "source": [ + "#### Constraint 15: Each Task Must Have A Start Time 🚀\n", + "\n", + "**English**: Every task must start exactly once\n", + "\n", + "**Impact**: Tasks must start to be completed, and they can only start once.\n", + "\n", + "**Math**: \n", + "$$\n", + "\\sum_{s=0}^{S-1} X_{t,s} = 1 \\quad \\forall t \\in \\{0, 1, \\ldots, T-1\\}\n", + "$$\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50d925d8", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 15: Each Task Must Have A Start Time Matrix Construction\n", + "print(\"🚀 Constraint 15: Each Task Must Have A Start Time\")\n", + "print()\n", + "\n", + "print(f\"A_eq_15 matrix equations (shape: {scheduler.A_eq_15.shape}):\")\n", + "for i, (row, b_val) in enumerate(zip(scheduler.A_eq_15, scheduler.b_eq_15)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x = {b_val}\")" + ] + }, + { + "cell_type": "markdown", + "id": "78cec84a", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that\n", + "\n", + "$$ X_{t,s}[0,0] + X_{t,s}[0,1] + X_{t,s}[0,2] + X_{t,s}[0,3] + X_{t,s}[0,4] = 1 $$ and $$ X_{t,s}[1,0] + X_{t,s}[1,1] + X_{t,s}[1,2] + X_{t,s}[1,3] + X_{t,s}[1,4] = 1 $$\n", + "\n", + "which doesn't allow more than 1 start time per task" + ] + }, + { + "cell_type": "markdown", + "id": "828e65f8", + "metadata": {}, + "source": [ + "#### Constraint 10: Task Duration Must Not Exceed Planning Horizon 📅\n", + "\n", + "**English**: A task cannot start at a time where its duration would extend beyond the available planning periods.\n", + "\n", + "**Impact**: Prevents scheduling tasks that would run past the end of the planning window, ensuring all work completes within the defined timeframe.\n", + "\n", + "**Math**: \n", + "$$\n", + "X_{t,a}[t,a] + X_{t,s}[t,s] \\leq 1 \\quad \\forall t, a, s \\quad \\text{ where } \\quad d_{t,a} > 0 \\text{ and } s + d_{t,a} > P\n", + "$$\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37f32f59", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 10: Task Duration Must Not Exceed Planning Horizon Matrix Construction\n", + "print(\"📅 Constraint 10: Task Duration Must Not Exceed Planning Horizon\")\n", + "print()\n", + "\n", + "if hasattr(scheduler, 'A_ub_10'):\n", + " print(f\"A_ub_10 matrix equations (shape: {scheduler.A_ub_10.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler.A_ub_10, scheduler.b_ub_10)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + "else:\n", + " print(\"No duration violations found - all tasks can complete within planning horizon\")" + ] + }, + { + "cell_type": "markdown", + "id": "82b1b5fd", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that \n", + "\n", + "$ X_{t,a}[0,0] + X_{t,s}[0,4] \\leq 1 \\quad X_{t,a}[0,1] + X_{t,s}[0,3] \\leq 1 \\quad X_{t,a}[0,1] + X_{t,s}[0,4] \\leq 1 $\n", + "\n", + "which says that if the first asset group is assigned to the first task, then the first task cannot start in the last period (because its duration is 2 periods). Similarly, if the second asset group is assigned to the first task, then the first task cannot start in the fourth or fifth periods (because its duration is 3 periods)." + ] + }, + { + "cell_type": "markdown", + "id": "547fcd6a", + "metadata": {}, + "source": [ + "#### Constraint 14a: Task Must Be Active When It Starts ⏰\n", + "\n", + "**English**: If a task starts in a specific period, it must also be active in that same period.\n", + "\n", + "**Impact**: Links start time decisions to activity periods - ensures that when a task starts, it's immediately active.\n", + "\n", + "**Math**: \n", + "$$\n", + "X_{t,s}[t,s] \\leq X_{t,p}[t,p] \\quad \\forall t, s \\quad \\text{ where } \\quad s = p \\text{ and } s < P\n", + "$$\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "864b9a1e", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 14a: Task Must Be Active When It Starts Matrix Construction\n", + "print(\"⏰ Constraint 14a: Task Must Be Active When It Starts\")\n", + "print()\n", + "\n", + "if hasattr(scheduler, 'A_ub_14a'):\n", + " print(f\"A_ub_14a matrix equations (shape: {scheduler.A_ub_14a.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler.A_ub_14a, scheduler.b_ub_14a)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + "else:\n", + " print(\"No start-time to period mapping constraints needed\")" + ] + }, + { + "cell_type": "markdown", + "id": "4be12261", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that \n", + "\n", + "$ -X_{t,p}[0,0] + X_{t,s}[0,0] \\leq 0 \\quad -X_{t,p}[0,1] + X_{t,s}[0,1] \\leq 0 \\quad -X_{t,p}[0,2] + X_{t,s}[0,2] \\leq 0 $\n", + "\n", + "which says that if the first task starts in the first time period, then the Xtp variable that corresponds to that start period must also be turned on." + ] + }, + { + "cell_type": "markdown", + "id": "0d13c74a", + "metadata": {}, + "source": [ + "#### Constraint 14b: Task Activity Must Match Duration ⏱️\n", + "\n", + "**English**: If a task is assigned to an asset and starts at a specific time, it must be active for exactly the duration required by that task-asset combination.\n", + "\n", + "**Impact**: Ensures tasks run for their complete required duration based on the chosen asset assignment and start time.\n", + "\n", + "**Math**: \n", + "$$\n", + "X_{t,a}[t,a] + X_{t,s}[t,s] - X_{t,p}[t,p] \\leq 1 \\quad \\forall t, a, s, p \\quad \\text{ where } \\quad d_{t,a} > 0 \\text{ and } s \\leq p < s + d_{t,a}\n", + "$$\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b8fdfa7d", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 14b: Task Activity Must Match Duration Matrix Construction\n", + "print(\"⏱️ Constraint 14b: Task Activity Must Match Duration\")\n", + "print()\n", + "\n", + "if hasattr(scheduler, 'A_ub_14b'):\n", + " print(f\"A_ub_14b matrix equations (shape: {scheduler.A_ub_14b.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler.A_ub_14b, scheduler.b_ub_14b)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + "else:\n", + " print(\"No duration enforcement constraints needed\")" + ] + }, + { + "cell_type": "markdown", + "id": "5a86ce07", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that \n", + "\n", + "$ X_{t,a}[0,0] - X_{t,p}[0,0] + X_{t,s}[0,0] \\leq 1 \\quad X_{t,a}[0,0] - X_{t,p}[0,1] + X_{t,s}[0,0] \\leq 1 \\quad X_{t,a}[0,0] - X_{t,p}[0,1] + X_{t,s}[0,1] \\leq 1$\n", + "\n", + "which ensures that for each case when a task-asset group starts in a certain time period, the Xtp variables that would align with the start time period and the duration of the task are turned on. The list of constraints follows a pattern where it loops through all task-asset group options, and then loops through all start time options, and uses a -1 coefficient on the Xtp variables that equate to the duration of the task. Constraint 14a only does the first Xtp variable corresponding to the start time. Constraint 14b ensures the Xtp variables that align with the duration of the task are also turned on." + ] + }, + { + "cell_type": "markdown", + "id": "2140b7de", + "metadata": {}, + "source": [ + "#### Constraint 16: Each Task Active For Exactly Its Duration ⚖️\n", + "\n", + "**English**: The total number of periods a task is active must exactly equal the duration required by its assigned asset group.\n", + "\n", + "**Impact**: Prevents tasks from being active for longer or shorter than required, working with Constraint 14b to ensure precise duration matching.\n", + "\n", + "**Math**: \n", + "$$\n", + "\\sum_{p=0}^{P-1} X_{t,p} = \\sum_{a=0}^{A-1} X_{t,a} \\cdot d_{t,a} \\quad \\forall t\n", + "$$\n", + "\n", + "**Note**: Constraint 14b ensures tasks are active during their assigned periods, but doesn't necessarily prevent periods from being active outside of the ones that it specifies in this constraint. For example, if Task 0 uses Asset 1 with duration=3 and starts in Period 2, Constraint 14b ensures Periods 2, 3, and 4 are turned on, but doesn't prevent Task 0 from being active in Periods 0, 1, or 5. Constraint 16 ensures the sum of Xtp variables equals the duration exactly. This method does not seem the cleanest right now, but no other methods were found that were cleaner.\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1bc91077", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 16: Each Task Active For Exactly Its Duration Matrix Construction\n", + "print(\"⚖️ Constraint 16: Each Task Active For Exactly Its Duration\")\n", + "print()\n", + "\n", + "if hasattr(scheduler, 'A_eq_16'):\n", + " print(f\"A_eq_16 matrix equations (shape: {scheduler.A_eq_16.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler.A_eq_16, scheduler.b_eq_16)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x = {b_val}\")\n", + "else:\n", + " print(\"No duration matching constraints needed\")" + ] + }, + { + "cell_type": "markdown", + "id": "e47ebb3a", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that \n", + "\n", + "$$ -2X_{t,a}[0,0] - 3X_{t,a}[0,1] -3X_{t,a}[0,2] + X_{t,p}[0,0] + X_{t,p}[0,1] + X_{t,p}[0,2] + X_{t,p}[0,3] + X_{t,p}[0,4] = 0 $$\n", + "$$ -3X_{t,a}[1,0] - 3X_{t,a}[1,2] + X_{t,p}[1,0] + X_{t,p}[1,1] + X_{t,p}[1,2] + X_{t,p}[1,3] + X_{t,p}[1,4] = 0 $$\n", + "\n", + "which ensures that no matter which Xta pair is selected, per task, that sum of the Xtp variables must equal the corresponding coefficient. If the first asset group is assigned to the first task, then there needs to be only two Xtp[0,p] variables turned on. Constraint 3 ensures that multiple Xta variables per task are not selected. This constraint is used because while Constraint 14b ensures the proper Xtp variables are turned on based on the task's duration, it has no control over the other Xtp variables outside of the start time period plus duration. This constraint provides the upper bound on those Xtp variables." + ] + }, + { + "cell_type": "markdown", + "id": "3261ea5b", + "metadata": {}, + "source": [ + "#### Constraint 4: Asset Conflict Prevention 🚫\n", + "\n", + "**English**: Individual assets cannot be used by multiple tasks simultaneously, even when those assets are part of different asset groups.\n", + "\n", + "**Impact**: Prevents physical resource conflicts where the same vessel would need to be in two places at once, ensuring realistic scheduling.\n", + "\n", + "**Math**: \n", + "$$\n", + "X_{t,a}[t_1,a_1] + X_{t,a}[t_2,a_2] + X_{t,p}[t_1,p] + X_{t,p}[t_2,p] \\leq 3 \\quad \\forall t_1, t_2, a_1, a_2, p \\text{ where individual assets overlap in asset groups}\n", + "$$\n", + "\n", + "**Implementation** for the current example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "506a0fbd", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint 4: Asset Conflict Prevention Matrix Construction\n", + "print(\"🚫 Constraint 4: Asset Conflict Prevention\")\n", + "print()\n", + "\n", + "if hasattr(scheduler, 'A_ub_4'):\n", + " print(f\"A_ub_4 matrix equations (shape: {scheduler.A_ub_4.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler.A_ub_4, scheduler.b_ub_4)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + "else:\n", + " print(\"No individual asset conflicts found - no constraints needed\")" + ] + }, + { + "cell_type": "markdown", + "id": "ef463b1c", + "metadata": {}, + "source": [ + "In this example, we are constraining the system so that \n", + "\n", + "$ X_{t,a}[0,0] + X_{t,a}[1,0] + X_{t,p}[0,0] + X_{t,p}[1,0] \\leq 3 $\n", + "\n", + "which ensures that if different tasks use the same asset (that are included in different asset groups), then that asset can only be used for one time period. In this case, if both tasks use the first asset group (which has the same assets between each other), then only one task can have their Xtp variables turned on because assets can't be doing two things at once. We also have other constraints like\n", + "\n", + "$ X_{t,a}[0,0] + X_{t,a}[1,2] + X_{t,p}[0,0] + X_{t,p}[1,0] \\leq 3 $\n", + "\n", + "which is used because the third asset group has at least one of the same assets that the first asset group has, and so the same rules apply. The implementation checks for similar assets in different asset groups and creates the constraints based on those overlaps. The bound value of 3 in this example is a function of the number of tasks (1 + T)." + ] + }, + { + "cell_type": "markdown", + "id": "4482266b", + "metadata": {}, + "source": [ + "### Constraint Summary 🔗\n", + "\n", + "1. **Constraint 1** ensures only valid assignments are possible\n", + "2. **Constraint 3** ensures every task gets exactly one asset\n", + "3. **Constraint 15** ensures every task starts exactly once \n", + "4. **Constraint 10** ensures the task duration does not exceed planning horizon\n", + "5. **Constraint 14a** links start time to activity period\n", + "6. **Constraint 14b** links start times to activity periods with correct duration\n", + "7. **Constraint 16** equates total activity periods to correct duration\n", + "8. **Constraint 4** prevents resource conflicts between asset groups" + ] + }, + { + "cell_type": "markdown", + "id": "4736b501", + "metadata": {}, + "source": [ + "### Objectives and other MILP Inputs\n", + "\n", + "Beyond the constraint matrices, the MILP optimizer requires three additional key inputs: the objective values vector, variable bounds, and integrality specifications.\n", + "\n", + "#### 1. Objective Values Vector\n", + "\n", + "The `values` vector defines the coefficients for the objective function that the optimizer seeks to minimize. In IRMA's scheduler, this represents the cost or penalty associated with each decision variable.\n", + "\n", + "Each element corresponds to a decision variable and represents the \"cost\" of setting that variable to 1.\n", + "\n", + "$$\\text{minimize } \\mathbf{c}^T \\mathbf{x} = \\sum_{i} c_i x_i$$\n", + "\n", + "where $c_i$ is the cost coefficient and $x_i$ is the binary decision variable.\n", + "\n", + "The costs of each task-asset combination are included as entries to the values vector for each coresponding Xta variable.\n", + "\n", + "Penalties are also included for each Xts variable to incentivize earlier Xts variables to be selected rather than later." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "193c7cc0", + "metadata": {}, + "outputs": [], + "source": [ + "print(scheduler.values)" + ] + }, + { + "cell_type": "markdown", + "id": "bf52c3c9", + "metadata": {}, + "source": [ + "### 2. Bounds\n", + "\n", + "The `bounds` parameter defines the lower and upper limits for each decision variable. In our case, all decision variables are binary (0 or 1), so bounds are set as:\n", + "```python\n", + "bounds = optimize.Bounds(0, 1) # 0 ≤ x_i ≤ 1\n", + "```\n", + "\n", + "**Mathematical Form**: For each variable $x_i$:\n", + "$$0 \\leq x_i \\leq 1$$\n", + "\n", + "\n", + "### 3. Integrality\n", + "\n", + "The `integrality` parameter specifies which variables must take integer values. It forces variables to be integers rather than continuous. In our case, we are only working with binary integers so integrality is set to 1 for all variables:\n", + "```python\n", + "integrality = np.ones(num_variables, dtype=int) # All variables are integers\n", + "```\n", + "\n", + "**Mathematical Form**: Each variable $x_i$ must satisfy:\n", + "$$x_i \\in \\{0, 1\\}$$\n", + "\n", + "Combined with bounds [0,1], this creates binary decision variables that are either \"not selected\" (0) or \"selected\" (1).\n" + ] + }, + { + "cell_type": "markdown", + "id": "2c0bb658", + "metadata": {}, + "source": [ + "## Running the Code\n", + "\n", + "When you run `scheduler.optimize()`, the optimization engine:\n", + "\n", + "1. **Builds** the constraint matrices (A_ub, A_eq, b_ub, b_eq) and MILP inputs\n", + "2. **Solves** the MILP problem using scipy.optimize.milp\n", + "3. **Returns** optimal values for all decision variables\n", + "4. **Decodes** the solution into human-readable schedules\n", + "\n", + "An example of how the optimizer is called:" + ] + }, + { + "cell_type": "markdown", + "id": "5c4a053c", + "metadata": {}, + "source": [ + "```python\n", + "from scipy.optimize import milp\n", + "\n", + "res = milp(\n", + " c=values, # Objective function coefficients\n", + " constraints=constraints, # List of LinearConstraint objects\n", + " integrality=integrality, # Integer specification for each variable\n", + " bounds=bounds # Variable bounds (0 to 1 for binary)\n", + ")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "eee8140e", + "metadata": {}, + "source": [ + "This formulates and solves the complete Mixed Integer Linear Programming problem:\n", + "- **Minimize**: $\\mathbf{c}^T \\mathbf{x}$ (total cost)\n", + "- **Subject to**: All constraint equations\n", + "- **Where**: $x_i \\in \\{0, 1\\}$ for all $i$ (binary decisions)\n", + "\n", + "Let's see this in action with our simple example!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "06088598", + "metadata": {}, + "outputs": [], + "source": [ + "# Solve the optimization problem\n", + "print(\"🚀 Solving the optimization problem...\\n\")\n", + "\n", + "result = scheduler.optimize()" + ] + }, + { + "cell_type": "markdown", + "id": "8fea43f1", + "metadata": {}, + "source": [ + "The results of the optimization provide a schedule for installation that minimizes cost and sastisfies all constraints!\n", + "\n", + "- The optimization follows our penalty of starting tasks as soon as possible\n", + "- It decides to schedule the \"Install Mooring\" task in the first 3 periods using the MPSV asset\n", + "- It decides to schedule the \"Install Anchor\" task also in the first 3 periods but using a separate vessel, the AHTS asset, which is allowed.\n", + "- This combination of task-asset group assignments minimized cost and kept all of our logical constraints honored.\n", + "\n", + "Now let's adjust the weather to see how that impacts the schedule and the limits of each asset group" + ] + }, + { + "cell_type": "markdown", + "id": "72cf987f", + "metadata": {}, + "source": [ + "### Constraint 17: Weather Restrictions 🌊\n", + "\n", + "**English**: Asset groups cannot be assigned to tasks during periods when weather conditions exceed their operational limits.\n", + "\n", + "**Impact**: Prevents scheduling tasks during unsuitable weather conditions, ensuring safety and operational limits are respected.\n", + "\n", + "**Math**:\n", + "$$\n", + "X_{t,a}[t,a] + X_{t,p}[t,p] \\leq 1 \\quad \\forall t, a, p \\quad \\text{ where } \\quad w_p > \\text{max\\_weather}_a\n", + "$$\n", + "\n", + "where:\n", + "- $w_p$ = weather severity in period $p$\n", + "- $\\text{max\\_weather}_a$ = minimum weather capability across all individual assets in asset group $a$\n", + "\n", + "**Implementation** for our current example (WHILE UPDATING OUR WEATHER CONDITIONS):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "96ec542e", + "metadata": {}, + "outputs": [], + "source": [ + "#scheduler.weather = [1, 2, 3, 3, 1]\n", + "scheduler.weather = [2, 3, 1, 1, 1]\n", + "scheduler.set_up_optimizer()\n", + "\n", + "# Constraint 17: Weather Restrictions Matrix Construction\n", + "print(\"🌊 Constraint 17: Weather Restrictions\")\n", + "print()\n", + "\n", + "if hasattr(scheduler, 'A_ub_17'):\n", + " print(f\"A_ub_17 matrix equations (shape: {scheduler.A_ub_17.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler.A_ub_17, scheduler.b_ub_17)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + "else:\n", + " print(\"No weather restrictions needed - all asset groups can work in all conditions\")" + ] + }, + { + "cell_type": "markdown", + "id": "ddf4089c", + "metadata": {}, + "source": [ + "In this example, we have adjusted the weather conditions of the scenario (1, 2, 2, 3, 1), which adds additional constraints: \n", + "\n", + "$ X_{t,a}[0,0] + X_{t,p}[0,1] \\leq 1 $\n", + "\n", + "which does not allow the first task-asset group combination to be active in the second period, since that period has weather conditions that exceed the allowances of the minimum asset in that asset group. Similarly, there are other constraints like\n", + "\n", + "$ X_{t,a}[0,2] + X_{t,p}[0,0] \\leq 1 \\quad X_{t,a}[0,2] + X_{t,p}[0,1] \\leq 1 $\n", + "\n", + "which does not allow the second asset group combined with the first task to be active in the first or second periods, since those periods have weather conditions that exceed the minimum allowance of the asset in that asset group.\n", + "\n", + "Now, let's rerun the scheduler and see what impact it has." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83598fda", + "metadata": {}, + "outputs": [], + "source": [ + "# Solve the optimization problem\n", + "print(\"🚀 Solving the optimization problem with adjusted weather conditions\\n\")\n", + "\n", + "result = scheduler.optimize()" + ] + }, + { + "cell_type": "markdown", + "id": "3721fb07", + "metadata": {}, + "source": [ + "The results of this new optimization show how the weather impacts the schedule!\n", + "\n", + "- Bad weather in period 1 does not allow any task-asset combination from being scheduled in that period\n", + "- This means that the tasks have to happen in periods 2, 3, and 4\n", + "\n", + "Now let's adjust the weather back to the original calm status and add in dependency constraints" + ] + }, + { + "cell_type": "markdown", + "id": "caba0c18", + "metadata": {}, + "source": [ + "### Constraint 2: Task Dependencies 🔗\n", + "\n", + "**English**: Tasks must be completed in a specific order based on their dependencies. For example, a task cannot start until all its prerequisite tasks have been completed.\n", + "\n", + "**Impact**: Ensures logical sequencing of installation activities - for example, anchors must be installed before mooring lines can be connected to them.\n", + "\n", + "**Math**: For finish_start dependency (most common type):\n", + "$$\n", + "X_{t,s}[t,s] \\leq \\sum_{s_d=0}^{s-d_{min}} X_{t,s}[d,s_d] \\quad \\forall t, s \\quad \\text{ where task } t \\text{ depends on task } d\n", + "$$\n", + "\n", + "where:\n", + "- $d_{min}$ = minimum duration of dependency task $d$ across all possible assets\n", + "- Task $t$ can only start at time $s$ if task $d$ started early enough to finish before time $s$\n", + "\n", + "**Implementation** for our example, which requires re-initializing the scheduler:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef062653", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's create a new scheduler instance with task dependencies\n", + "print(\"🔗 Constraint 2: Task Dependencies\")\n", + "print()\n", + "\n", + "# Define task dependencies: mooring installation depends on anchor installation\n", + "task_dependencies = {\n", + " 'install_mooring': ['install_anchor'] # Mooring installation depends on anchor installation\n", + "}\n", + "\n", + "dependency_types = {\n", + " 'install_anchor->install_mooring': 'finish_start' # Anchor must finish before mooring starts\n", + "}\n", + "\n", + "print(\"📋 Task Dependencies:\")\n", + "print(f\" install_mooring depends on: {task_dependencies['install_mooring']}\")\n", + "print(f\" Dependency type: {dependency_types['install_anchor->install_mooring']}\")\n", + "print()\n", + "\n", + "# Create scheduler with dependencies\n", + "scheduler_with_deps = Scheduler(\n", + " tasks=tasks,\n", + " assets=assets,\n", + " task_asset_matrix=task_asset_matrix,\n", + " weather=[1, 1, 1, 1, 1], # Reset to good weather\n", + " asset_groups=asset_groups,\n", + " task_dependencies=task_dependencies,\n", + " dependency_types=dependency_types,\n", + " wordy=0\n", + ")\n", + "\n", + "scheduler_with_deps.set_up_optimizer()\n", + "\n", + "if hasattr(scheduler_with_deps, 'A_ub_2'):\n", + " print(f\"\\nA_ub_2 matrix equations (shape: {scheduler_with_deps.A_ub_2.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler_with_deps.A_ub_2, scheduler_with_deps.b_ub_2)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + " \n", + " print(f\"\\nConstraint 2 ensures that the mooring installation task cannot start\")\n", + " print(f\"until the anchor installation task has been completed, maintaining logical\")\n", + " print(f\"installation sequencing.\")\n", + "else:\n", + " print(\"No task dependencies defined - no constraints needed\")" + ] + }, + { + "cell_type": "markdown", + "id": "02b1e346", + "metadata": {}, + "source": [ + "In this example, we have updated the dependencies to ensure that Task 0 (Install Mooring) is only done after Task 1 (Install Anchor) is completed, which adds many additional constraints, like:\n", + "\n", + "$ X_{t,a}[1,0] + X_{t,s}[0,0] + X_{t,s}[1,0] \\leq 2 \\quad X_{t,a}[1,0] + X_{t,s}[0,1] + X_{t,s}[1,0] \\leq 2 \\quad X_{t,a}[1,0] + X_{t,s}[0,2] + X_{t,s}[1,0] \\leq 2$\n", + "\n", + "which ensures that if the second task is active with the first asset group, then the first task cannot start in periods 0, 1, or 2 if the second task starts in period 0. (The names of first and second are confusing here, but it's written correctly).\n", + "\n", + "This same logic applies for all other start times for each feasible task-asset combination for the dependent task, blocking it from occupying time periods that would violate the constraint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0ea6a663", + "metadata": {}, + "outputs": [], + "source": [ + "scheduler_with_deps.optimize()" + ] + }, + { + "cell_type": "markdown", + "id": "1fff95eb", + "metadata": {}, + "source": [ + "### Constraint 2+: Task Dependencies with Offset Types\n", + "\n", + "The IRMA scheduler now supports two types of time offsets: **minimum** and **exact**.\n", + "\n", + "- **Minimum**: Task must wait **at least** the specified number of periods\n", + "- **Exact**: Task must start/finish **exactly** the specified number of periods later\n", + "\n", + "Format Options:\n", + "\n", + "```python\n", + "# Option 1: Simple format (defaults to minimum)\n", + "offsets = {\n", + " 'task1->task2': 3 # Minimum 3 periods\n", + "}\n", + "\n", + "# Option 2: Tuple format (specify type)\n", + "offsets = {\n", + " 'task1->task2': (3, 'exact'), # Exactly 3 periods\n", + " 'task1->task3': (2, 'minimum') # At least 2 periods\n", + "}\n", + "\n", + "# Option 3: Dictionary format (most explicit)\n", + "offsets = {\n", + " 'task1->task2': {'value': 3, 'type': 'exact'},\n", + " 'task1->task3': {'value': 2, 'type': 'minimum'}\n", + "}\n", + "```\n", + "\n", + "Let's demonstrate the difference with a practical example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d924838", + "metadata": {}, + "outputs": [], + "source": [ + "# Define task dependencies: mooring installation depends on anchor installation\n", + "task_dependencies = {\n", + " 'install_mooring': ['install_anchor'] # Mooring installation depends on anchor installation\n", + "}\n", + "\n", + "dependency_types = {\n", + " 'install_anchor->install_mooring': 'start_start' # Mooring starts relative to when anchor starts\n", + "}\n", + "\n", + "# Test 1: Minimum offset (default behavior)\n", + "print(\"\\n📋 Test 1: MINIMUM Offset\")\n", + "print(\"Task setup: mooring starts AT LEAST 1 period after anchor starts\")\n", + "\n", + "offsets_min = {\n", + " 'install_anchor->install_mooring': 1 # Simple format defaults to minimum\n", + "}\n", + "\n", + "scheduler_min = Scheduler(\n", + " tasks=tasks,\n", + " assets=assets,\n", + " task_asset_matrix=task_asset_matrix,\n", + " task_dependencies=task_dependencies,\n", + " dependency_types=dependency_types,\n", + " offsets=offsets_min,\n", + " weather=[1, 1, 1, 1, 1],\n", + " asset_groups=asset_groups,\n", + " wordy=0\n", + ")\n", + "\n", + "scheduler_min.set_up_optimizer()\n", + "\n", + "if hasattr(scheduler_min, 'A_ub_2'):\n", + " print(f\"\\nA_ub_2 matrix equations (shape: {scheduler_min.A_ub_2.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler_min.A_ub_2, scheduler_min.b_ub_2)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + " \n", + " print(f\"\\nConstraint 2 can ensure that the mooring installation task cannot start\")\n", + " print(f\"until AT LEAST 1 period after the anchor installation task has been\")\n", + " print(f\"completed, maintaining logical installation sequencing.\")\n", + "else:\n", + " print(\"No task dependencies defined - no constraints needed\")\n", + "\n", + "result_min = scheduler_min.optimize()" + ] + }, + { + "cell_type": "markdown", + "id": "38f7e788", + "metadata": {}, + "source": [ + "In this example, we have additional inputs that specify that Task 0 (Install Mooring) must be done AT LEAST 1 period (offset) after Task 1 (Install Anchor) starts. The constraints that it makes are like the following:\n", + "\n", + "$ X_{t,s}[0,0] + X_{t,s}[1,0] \\leq 1 \\quad X_{t,s}[0,0] + X_{t,s}[1,1] \\leq 1 \\quad X_{t,s}[0,1] + X_{t,s}[1,1] \\leq 1 $\n", + "\n", + "which says that if Task 1 starts in period 0, then Task 0 cannot start in period 0 (but it could start anywhere else). Similarly, if Task 1 starts in period 1, then Task 0 cannot start in period 0 nor 1.\n", + "\n", + "**Notes**:\n", + "- Task 0 does not have to start exactly 1 period after Task 1 starts (for this case), but it will try too since there are penalty objective values on later start times\n", + "- Using the dependency type of 'finish_start' creates 1's in Xta variables in the constraints because the duration of the 'finish' depends on the duration of the specific task-asset group combination. Using 'start_start' does not include 1's in the constraint row because it does not require information about the duration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aa908935", + "metadata": {}, + "outputs": [], + "source": [ + "# Test 2: Exact offset\n", + "print(\"\\n📋 Test 2: EXACT Offset\")\n", + "print(\"Task setup: mooring starts EXACTLY 1 period after anchor starts\")\n", + "\n", + "offsets_exact = {\n", + " 'install_anchor->install_mooring': (1, 'exact') # Tuple format: (value, type)\n", + "}\n", + "\n", + "scheduler_exact = Scheduler(\n", + " tasks=tasks,\n", + " assets=assets,\n", + " task_asset_matrix=task_asset_matrix,\n", + " task_dependencies=task_dependencies,\n", + " dependency_types=dependency_types,\n", + " offsets=offsets_exact,\n", + " weather=[1, 1, 1, 1, 1],\n", + " asset_groups=asset_groups,\n", + " wordy=0\n", + ")\n", + "\n", + "scheduler_exact.set_up_optimizer()\n", + "\n", + "if hasattr(scheduler_exact, 'A_ub_2'):\n", + " print(f\"\\nA_ub_2 matrix equations (shape: {scheduler_exact.A_ub_2.shape}):\")\n", + " for i, (row, b_val) in enumerate(zip(scheduler_exact.A_ub_2, scheduler_exact.b_ub_2)):\n", + " row_str = '[' + ' '.join([str(val) for val in row]) + ']'\n", + " print(f\"{row_str} x ≤ {b_val}\")\n", + " \n", + " print(f\"\\nConstraint 2 can also ensure that the mooring installation task cannot start\")\n", + " print(f\"until EXACTLY 1 period after the anchor installation task has been completed, \")\n", + " print(f\"maintaining logical installation sequencing.\")\n", + "else:\n", + " print(\"No task dependencies defined - no constraints needed\")\n", + "\n", + "result_exact = scheduler_exact.optimize()" + ] + }, + { + "cell_type": "markdown", + "id": "2969cb3f", + "metadata": {}, + "source": [ + "In this example, we run the same scenario but specify that Task 0 MUST start EXACTLY 1 period after the start of Task 1. The constraints are made like the following\n", + "\n", + "$$ -X_{t,s}[0,1] + X_{t,s}[1,0] \\leq 0 $$\n", + "\n", + "$$ X_{t,s}[0,0] + X_{t,s}[1,0] \\leq 1 \\quad X_{t,s}[0,2] + X_{t,s}[1,0] \\leq 1 \\quad X_{t,s}[0,3] + X_{t,s}[1,0] \\leq 1 $$\n", + "\n", + "which says in the first equation first, if Task 1 starts in period 0, then Task 0 must start in period 1. And in the second set of equations, if Task 1 starts in period 0, then Task 0 cannot start in any other period (0, 2, 3, 4)." + ] + }, + { + "cell_type": "markdown", + "id": "677af285", + "metadata": {}, + "source": [ + "Can add more explanations/examples for different dependency types and offsets" + ] + }, + { + "cell_type": "markdown", + "id": "5e056bfb", + "metadata": {}, + "source": [ + "## A More Complicated Case\n", + "\n", + "Provide an example of a more involved schedule problem with many more assets and tasks..." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "famodel-env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/famodel/irma/spec_conversions.yaml b/famodel/irma/spec_conversions.yaml new file mode 100644 index 00000000..86d958f7 --- /dev/null +++ b/famodel/irma/spec_conversions.yaml @@ -0,0 +1,28 @@ +# This file specifies optional capability specification keys that can be used +# to support inputs in common industry units rather than SI units. + +# format: key name with common unit, conversation factor from common->SI, fundamental key name (SI unit), +- [ area_sqf , 0.092903 , area ] +- [ area_m2 , 1.0 , area ] +- [ max_load_t , 9806.7 , max_load ] +- [ volume_m3 , 1.0 , volume ] +- [ volume_cf , 0.028317 , volume ] +- [ width_ft , -1 , width ] # width [ft] +- [ max_line_pull_t , 9806.7 , max_line_pull ] # continuous line pull [t] +- [ brake_load_t , 9806.7 , brake_load ] # static brake holding load [t] +- [ site_speed_mps , 1.0 , site_speed ] +- [ speed_mpm , 0.01667 , speed ] # payout/haul speed [m/min] -> m/s +- [ max_hold_force_t , 9806.7 , max_hold_force ] # +- [ max_force_t , 9806.7 , max_force ] # bollard pull [t] +- [ length_capacity_m , 1.0 , length_capacity ] # length a reel can store, etc. +- [ capacity_t , 9806.7 , capacity ] # SWL at specified radius [t] +- [ hook_height_m , 1.0 , hook_height ] # max hook height [m] +- [ hook_height_ft , 0.3048 , hook_height ] # max hook height [m] +- [ towing_pin_rating_t , 9806.7 , towing_pin_rating ] # rating of towing pins [t] (optional) +- [ power_hp , -1 , power ] # W +- [ power_kW , 1000.0 , power ] # W +- [ pressure_bar , 1.0e5 , pressure ] # Pa +- [ flow_rate_m3hr , 0.00027777,flow_rate ] # cubic m per hour to per s +- [ weight_t , 9806.7 , weight ] # N or should this be mass in kg? +- [ centrifugal_force_kN, 1000.0 , centrifugal_force ] +- [ torque_kNm , 1000.0 , torque ] diff --git a/famodel/irma/task.py b/famodel/irma/task.py new file mode 100644 index 00000000..8c66a2f9 --- /dev/null +++ b/famodel/irma/task.py @@ -0,0 +1,1557 @@ +"""Action base class""" + +import numpy as np +import matplotlib.pyplot as plt + +import moorpy as mp +from moorpy.helpers import set_axes_equal +from moorpy import helpers +import yaml +from copy import deepcopy +import networkx as nx + +# Import select required helper functions +from famodel.helpers import (check_headings, head_adjust, getCableDD, getDynamicCables, + getMoorings, getAnchors, getFromDict, cleanDataTypes, + getStaticCables, getCableDesign, m2nm, loadYAML, + configureAdjuster, route_around_anchors) + + +class Task(): + ''' + A Task is a general representation of a set of marine operations + that follow a predefined sequence/strategy. There can be multiple + tasks that achieve the same end, each providing an alternative strategy. + Each Task consists of a set of Actions with internal dependencies. + + + For now, we'll assume each Task must be port-to-port, + i.e. its vessel(s) must start and end at port over the course of the task. + + + + ''' + + def __init__(self, name, actions, action_sequence='series', **kwargs): + '''Create an action object... + It must be given a name and a list of actions. + The action list should be by default coherent with actionTypes dictionary. + + Parameters + ---------- + name : string + A name for the action. It may be appended with numbers if there + are duplicate names. + actions : list + A list of all actions that are part of this task. + action_sequence : string or dict, optional + If a dictionary, each key is the name of each action, and the values are + each a list of which actions (by name) must be completed before the current + one. + If a string, indicates which approach is used for automatically + setting the sequence of actions: + 'series': one after the other based on the order in actions (default), + 'dependencies': based on the dependencies of each action. + kwargs + Additional arguments may depend on the task type. + + ''' + + + self.name = name + print(f" Initializing Task '{self.name}") + + # Save the task's dictionary of actions + if isinstance(actions, dict): + self.actions = actions + elif isinstance(actions, list): # turn list into a dict based on name + self.actions = {a.name: a for a in actions} + + # --- Set up the sequence of actions --- + # key is action name, value is a list of what action names are to be completed before it + + + if isinstance(action_sequence, dict): # Full dict provided (use directly) + self.action_sequence = {k: list(v) for k, v in action_sequence.items()} + + elif isinstance(action_sequence, str): + self.action_sequence = {} + + if action_sequence == 'series': # Puts the actions in linear sequence + actions = list(self.actions.values()) + for i in range(len(actions)): + if i==0: # first action has no dependencies + self.action_sequence[actions[i].name] = [] + else: # previous action must be done first + self.action_sequence[actions[i].name] = [ actions[i-1].name ] + + elif action_sequence == 'dependencies': # Sequences based on the dependencies of each action + + def getDeps(action): + deps = [] + for dep in action.dependencies: + deps.append(dep) + return deps + + self.action_sequence = {self.actions[name].name: getDeps(self.actions[name]) for name in self.actions} + else: + raise Exception("Action_sequence must be either 'series' or 'dependencies', or a dict.") + else: + raise Exception("Action_sequence must be either a string or dict.") + + + # Initialize some task variables + self.status = 0 # 0, waiting; 1=running; 2=finished + self.actions_ti = {} # relative start time of each action [h] + + self.duration = 0.0 # duration must be calculated based on lengths of actions + self.cost = 0.0 # cost must be calculated based on the cost of individual actions. + self.ti = 0.0 # task start time [h?] + self.tf = 0.0 # task end time [h?] + ''' + # Calculate duration and cost + self.calcDuration() # organizes actions and calculates duration + self.calcCost() + + print(f"---------------------- Initializing Task '{self.name} ----------------------") + print(f"Task '{self.name}' initialized with duration = {self.duration:.2f} h.") + print(f"Task '{self.name}' initialized with cost = ${self.cost:.2f} ") + ''' + + # --- Make a list that conveys the action sequence (similar format as Mooring subcomponents) + act_sequence = dependenciesToSequence(self.action_sequence) + # (contents of sequence or action names/keys) + + # >>> temporarily hard coded here >>> + # Here's a list of specs we might want to take the max of instead of sum: Add more as needed + specs_to_max = ['hook_height_m', 'depth_rating_m', + 'max_depth_m', 'accuracy_m', + 'speed_mpm', 'capacity_t'] + + + # ----- Get Task requirements ----- + + # Go through each series step in the task action sequence and figure out its requirements + # (storage capacities will add, for example) + + # A Task-level dependency dict that describes the overally requirements + # when all actions are combined in sequence + self.requirements = {} + + #req_bases = {} # accumulation of all the requirements over the task's action sequence (maxes, etc.) + # + req_sequences = [[]] # sequence of totalled up requirements at each step + # capacity specs will add/subtract, while others will be instantaneous + # req_sequences is as nested list-list-dict-dict-dict of breakdown -> i_step -> req -> capacity* -> spec + # Whenever there are multiply capacities in a req, the number of + # breakdowns multiplies (branches) by the number of capacities. + # For storage-y reqs, there will only be one capacity listed in the final data. + + for i, step in enumerate(act_sequence): # go through each step in the action sequence + + #print(f" === Step {i} ==== ({len(req_sequences)} breakdowns)") + + for j in range(len(req_sequences)): + #if i == 0: # first step, start with a blank dict + req_sequences[j].append({}) # for the reqs to log at this step + #else: # copy over the last step's requirements as a starting point + # req_sequences[j].append(deepcopy(req_sequences[j][i-1])) + + # ----- Parallel actions case ----- + if isinstance(step, list): # parallel actions + # Currently, this approach just sums up the requirements/capabilities across + # parallel actions. This has the implication that one requirement must be + # fulfilled by only one type of capability. I.e. chain storage can't be + # divided between both deck space and chain locker. + + # A constraint to be considered is for parallel actions to be performed + # by separate vessels. That would require more thought, then implementation. + + for step_act in step: + + act = self.actions[step_act] + + # Go through requirements of the single action at this step + for req in act.requirements.values(): + + #print(f" Requirement {req['base']}") + + # is this requirement related to storage? + storey = req['base'] in ['storage','chain_storage','rope_storage','cable_storage'] + + nold = len(req_sequences) # number of possible breakdowns SO FAR + + # Iterate for each possible requirements breakdown + for j in range(nold): + #print(f" j={j}") + + # Add an entry for this requirement if one doesn't already exist from last step + if not req['base'] in req_sequences[j][i]: + req_sequences[j][i][req['base']] = {} # add entry for this requirement + + ncaps = len(req['capabilities']) + + for k, cap in enumerate(req['capabilities']): # go through capabilities in the req + + #print(f" k={k} - capability is {cap}") + + # force to use the same storage as established previously if unloading + if storey and req['direction'] == -1: # if unloading + # look through prevous actions... + doNothing = True + for iprev in range(i-1, -1, -1): + if isinstance(act_sequence[iprev], list): # if parallel actions here + for act2_name in act_sequence[iprev]: + act2 = self.actions[act_sequence[iprev]] + if act.dependsOn(act2): # if dependency, look for related action + for req2 in act2.requirements.values(): + # if the same storage requirement gets added to or loaded + if req['base'] == req2['base'] and req2['direction']==1: + # Check if the current capability is what was loaded to + if cap in req_sequences[j][iprev][req['base']]: + doNothing = False # flag that this is the case to keep + break + else: + act2 = self.actions[act_sequence[iprev]] + if act.dependsOn(act2): # if dependency, look for related action + for req2 in act2.requirements.values(): + # if the same storage requirement gets added to or loaded + if req['base'] == req2['base'] and req2['direction']==1: + # Check if the current capability is what was loaded to + if cap in req_sequences[j][iprev][req['base']]: + doNothing = False # flag that this is the case to keep + break + + # this must mean we aren't unloading from a prevoiusly loaded capacity in this + # particular loop, so skip it + if doNothing: + continue # skip the rest of this + + # make a copy of things if it's a storage-y requirement and being added to + # (there are only options when adding to storage, not when removing) + if k < ncaps-1 and storey and req['direction'] == 1: + # I guess we need to make a copy of everything that happened before this, + this_req_sequence = deepcopy(req_sequences[j]) + + else: # otherwise (i.e. on the last one) work with the current sequence + this_req_sequence = req_sequences[j] + + # If this capacity isn't already stored at this req in this step (i.e. this is parallel action 0) + #if not cap in this_req_sequence[i][req['base']]: + new_cap = {} # add an entry for the capacity's specs + #else: # otherwise use the existing one + + + if i==0: # first step (starts off the values) + + for spec, val in req['capabilities'][cap].items(): + new_cap[spec] = val # add the specs + + else: # subsequent steps (accumulates some things) + + # -- add/subtract capability specs depending on direction -- + + last_specs = {} + + if storey: # if it's a storage related spec, make sure to work with prevous values + for iprev in range(i-1, -1, -1): # look for last time this requirement's capacity came up + if req['base'] in req_sequences[j][iprev] and cap in req_sequences[j][iprev][req['base']]: + last_specs = req_sequences[j][iprev][req['base']][cap] # cap value in previous step + break + + for spec, val in req['capabilities'][cap].items(): # go through specs of the capability + + if spec in this_req_sequence[i][req['base']][cap]: # check if it's already here (from a parallel action) + last_val = this_req_sequence[i][req['base']][cap][spec] + elif spec in last_specs: # otherwise use the previous value if available, so that we add to it + last_val = last_specs[spec] + else: + last_val = 0 + + # note: this logic should be good for storagey reqs, but unsure for others, e.g. cranes + + if req['direction'] == 0 or spec in specs_to_max: + new_cap[spec] = max(last_val, val) + + elif req['direction'] == 1: # add to the previous value + new_cap[spec] = last_val + val # add to previous value + + elif req['direction'] == -1: # subtract from the previous value + new_cap[spec] = last_val - val # subtract from previous value + + else: + raise Exception("Invalid direction value (must be 0, 1, or -1).") + + + this_req_sequence[i][req['base']][cap] = new_cap # add this req's info (may overwrite in parallel case) + + # Append this as a new possible sequence + if k < ncaps-1 and storey and req['direction'] == 1: + req_sequences.append(this_req_sequence) + # Note: if k==0 then the existing req sequence has already been adjusted + + + # ----- normal case, single action ----- + else: + act = self.actions[step] + + # Go through requirements of the single action at this step + for req in act.requirements.values(): + + #print(f" Requirement {req['base']}") + + # is this requirement related to storage? + storey = req['base'] in ['storage','chain_storage','rope_storage','cable_storage'] + + nold = len(req_sequences) # number of possible breakdowns SO FAR + + # >>> bifurcate the current branch of the req_sequences, + # adding n-1 new branches where n is the number of capabilities + # (each which represents one possibility for satisfying the req) + #n = len(req['capabilities']) + + # Iterate for each possible requirements breakdown + for j in range(nold): + #print(f" j={j}") + + # Add an entry for this requirement if one doesn't already exist from last step + #if not req['base'] in req_sequences[j][i]: + req_sequences[j][i][req['base']] = {} # add entry for this requirement + + ncaps = len(req['capabilities']) + + for k, cap in enumerate(req['capabilities']): # go through capabilities in the req + # for k in range(len(req['capabilities'])-1, -1, -1): # go through capabilities in the req + #cap req['capabilities'].keys() + + #print(f" k={k} - capability is {cap}") + + # force to use the same storage as established previously if unloading: + if storey and req['direction'] == -1: # if unloading + keepThisCapability = False + for iprev in range(i-1, -1, -1): # look through prevous actions... + act2 = self.actions[act_sequence[iprev]] + if act.dependsOn(act2): # do something special here? + + for req2 in act2.requirements.values(): + # if the same storage requirement gets added to or loaded + if req['base'] == req2['base'] and req2['direction']==1: + # Check if the current capability is what was loaded to + if cap in req_sequences[j][iprev][req['base']]: + #if i==4 and j==1: + # breakpoint() + keepThisCapability = True # flag that this is the case to keep + break + + # this must mean we aren't unloading from a prevoiusly loaded capacity in this + # particular loop, so kip it + if not keepThisCapability: + #if act.name=='lay_mooring-fowt0a': + # breakpoint() + #print(f"WARNING - action {act.name} involves unloading storage but the prior load action was not found") + continue # skip adding this capability + + # >>> still need to add support for parallel actions <<< + + # make a copy of things if it's a storage-y requirement and being added to + # (there are only options when adding to storage, not when removing) + #if k < ncaps-1 and storey and req['direction'] == 1: <<< old one + if k < ncaps-1 and not (storey and req['direction'] == -1): + # I guess we need to make a copy of everything that happened before this, + if j>20000: + breakpoint() + this_req_sequence = deepcopy(req_sequences[j]) + + else: # otherwise (i.e. on the last one) work with the current sequence + this_req_sequence = req_sequences[j] + + new_cap = {} # add an entry for the capacity's specs + + if i==0: # first step (starts off the values) + + for spec, val in req['capabilities'][cap].items(): + new_cap[spec] = val # add the specs + + else: # subsequent steps (accumulates some things) + + # -- add/subtract capability specs depending on direction -- + # if this req and cap already exist + + last_specs = {} + + if storey: # if it's a storage related spec, make sure to work with prevous values + for iprev in range(i-1, -1, -1): # look for last time this requirement's capacity came up + if req['base'] in req_sequences[j][iprev] and cap in req_sequences[j][iprev][req['base']]: + last_specs = req_sequences[j][iprev][req['base']][cap] # cap value in previous step + break + + #if not cap in req_sequences[j][i][req['base']]: # if capacity doesn't exist in past + # req_sequences[j][i][req['base']][cap] = {} # add a blank for it + + for spec, val in req['capabilities'][cap].items(): # go through specs of the capability + + if spec in last_specs: + last_val = last_specs[spec] + #if spec in req_sequences[j][i][req['base']][cap]: + # last_val = req_sequences[j][i][req['base']][cap][spec] + else: + last_val = 0 + + if req['direction'] == 0 or spec in specs_to_max: + new_cap[spec] = max(last_val, val) + + elif req['direction'] == 1: # add to the previous value + new_cap[spec] = last_val + val # add to previous value + + elif req['direction'] == -1: # subtract from the previous value + new_cap[spec] = last_val - val # subtract from previous value + + else: + raise Exception("Invalid direction value (must be 0, 1, or -1).") + + #print(f" {act.name} {req['base']} {cap} {spec} ") + + #... also check if a spec value is going to go below zero, leave at zero ^^^ + # also distinguish between stock and flow specs, e.g. some to max vs add/subtract ^^^ + + #if act.name == 'mooring_hookup-fowt0a': + # breakpoint() + + this_req_sequence[i][req['base']][cap] = new_cap # add this req's info + + #if j > 40: + # breakpoint() + + # Append this as a new possible sequence + #if k < ncaps-1 and storey and req['direction'] == 1: + if k < ncaps-1 and not (storey and req['direction'] == -1): + req_sequences.append(this_req_sequence) + # Note: if k==0 then the existing req sequence has already been adjusted + + print(f"Task requirements processed. There are {len(req_sequences)} possible combinations.") + + + # Go through the requirements sequence and find the maximum values + # These become the overall requirements of the task. + task_reqs = [] + for j in range(len(req_sequences)): + task_reqs.append({}) # An empty dictionary of requirements for this breakdown + + for i, rs in enumerate(req_sequences[j]): + + for req, caps in rs.items(): + + # if req not already in the list, add it + if not req in task_reqs[j]: + task_reqs[j][req] = {} + + # go through req capabilities + for cap, specs in caps.items(): + if not cap in task_reqs[j][req]: # if cap not in the list, + task_reqs[j][req][cap] = {} # add it + + # go through capability specs and take the maxes + for spec, val in specs.items(): + if spec in task_reqs[j][req][cap]: + last_val = task_reqs[j][req][cap][spec] + else: + last_val = 0 + + # Retain the max value of the spec + task_reqs[j][req][cap][spec] = max(last_val, val) + + if len(req_sequences) > 20000: + breakpoint() + print("there's a lot of options") + + # Save things + self.act_sequence = act_sequence + self.req_sequences = req_sequences + self.task_reqs = task_reqs + + + + def checkAssets(self, assets, display=0): + ''' + Checks if a specified set of assets has sufficient capabilities and + specs to fulfill all requirements of this task. + + Parameters + ---------- + assets : list of assets + ''' + + # this should evaluate the assets w.r.t. self.task_reqs + + + + # Sum up the asset capabilities and their specs (not sure this is useful/valid) + + # Here's a list of specs we might want to take the max of instead of sum: Add more as needed + ''' + specs_to_max = ['hook_height_m', 'depth_rating_m', + 'max_depth_m', 'accuracy_m', + 'speed_mpm', 'capacity_t'] # capacity_t is here because it doesn't make sense to have two cranes to lift a single anchor. + ''' + asset_caps = combineCapabilities(assets, display=display-1) + + # <<< maybe instead of all this we should do an approach that looks by asset + # because that could then also be used to decide asset assignment + # to each requirement >>> + + + # See if summed asset capabilities satisfy any of the n task_req breakdowns + # .>>> an output of this could also be assigning assets to action requirements!! >>> + + requirements_met = [] + assignable = [] + + for i in range(len(self.task_reqs)): + + if display > 2: print(f"Task {self.name} requirements breakdown #{i}:") + + requirements_met.append({}) + + requirements_met[i] = doCapsMeetRequirements(asset_caps, self.task_reqs[i]) + ''' + + for req, caps in self.task_reqs[i].items(): # go through each requirement + + requirements_met[i][req] = False # start assume it is not met + + # Let's check if each capability is sufficiently provided for + capable = True # starting with optimism... + + for cap, specs in caps.items(): # go throuch each capability of the requirement + + if cap not in asset_caps: # assets don't have this capability, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' is missing from the assets.") + break + + for key, val in specs.items(): # go through each spec for this capability + + if val == 0: # if zero value, no spec required, move on + continue + if key not in asset_caps[cap]: # if the spec is missing, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' does not have spec '{key}'.") + break + if asset_caps[cap][key] < val: # if spec is too small, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' does not meet spec '{key}' requirement of {val:.2f} (has {asset_caps[cap][key]:.2f}).") + break + + # Final call on whether requirement can be met + if capable: + requirements_met[i][req] = True + else: + requirements_met[i][req] = False + if display > 1: print(f"Requirement '{req}' is not met by asset(s):") + if display > 2: print(f"{assets}.") + break + ''' + # Check if all requirements are met by the assets for this breakdown + assignable.append(all(requirements_met[i].values())) + if display > 1: print(f" Suitability is {assignable[i]}.") + + if display > 0: + print(f"Finished checking assets. {sum(assignable)} of {len(assignable)} requirement breakdowns are feasible.") + ''' + if self.name =='install_all_anchors': + for i in range(len(self.task_reqs)): + + + print(doCapsMeetRequirements(asset_caps, self.task_reqs[i])) + + if not 'divers' in self.task_reqs[i]['anchor_orienting']: + print(i) + printStruct(self.task_reqs[i]) + + breakpoint() + ''' + ''' (Older method that looks at any capability being satisfied) + requirements_met = {} + for req, vals in self.requirements.items(): # go through each requirement + + caps = vals['capabilities'] + dir = vals['direction'] + + # The following logic should mark a requirement as met if any one of + # the requirement's needed capabilities has all of its specs by the + # combined spec values of the assets + + requirements_met[req] = False # start assume it is not met + + for cap, specs in caps.items(): # go throuch capability of the requirement + if cap not in asset_caps: # assets don't have this capability, move on + continue + + # Let's check if this capability is sufficient + capable = True + for key, val in specs.items(): # go through each spec for this capability + + if val == 0: # if zero value, no spec required, move on + continue + if key not in asset_caps[cap]: # if the spec is missing, fail + capable = False + print(f"Warning: capability '{cap}' does not have metric '{key}'.") + break + if asset_caps[cap][key] < val: # if spec is too small, fail + # note: may need to add handling for lists/strings, or standardize specs more + capable = False + print(f"Warning: capability '{cap}' does not meet metric '{key}' requirement of {val:.2f} (has {asset_caps[cap][key]:.2f}).") + break + + if capable: + requirements_met[req] = True # one capability fully satisfies the requirement + break # no need to check other capabilities for this requirement + + if not requirements_met[req]: + print(f"Requirement '{req}' is not met by asset(s): {assets}.") + + assignable = all(requirements_met.values()) + + # message: + if assignable: + message = "Asset meets all required capabilities." + else: + unmet = [req for req, met in requirements_met.items() if not met] + detailed = [] + for req in unmet: + expected = [cap for cap in self.requirements[req].keys()] + detailed.append(f"- {req}: {expected}.") + detailed_msg = "\n".join(detailed) + + message = "Asset does not meet the following required capabilities:\n" + detailed_msg + ''' + + + # return bool of if any req breakdowns can be satisfied, and a list of which ones + return any(assignable), assignable + + + def assignAssets(self, assets, display=0): + '''Figures out an assignment of the asset capabilities to the task's + steps' requirements, including each action's requirements.''' + + doable, indices = self.checkAssets(assets) + + if doable: + + # sum up combined asset capabilities + asset_caps = combineCapabilities(assets) + + # take the first requirement breakdown that works + ind = indices.index(True) # get the index of the first true value + + # Select that breakdown (update Task's/actions' requirements) <<< can be turned into method + # Go through and delete any requirements in the actions that don't correspond to this breakdown + # traverse action sequence + for i, step in enumerate(self.act_sequence): # go through each step in the action sequence + + if isinstance(step, list): # Parallel actions case + for j in len(step): # each parallel action + pass # (we don't actually know how to handle this yet) <<< + + else: # normal case (one action at a time) + + action = self.actions[self.act_sequence[i]] # this step's action + reqs = self.req_sequences[ind][i] # altered/active requirements at this step + + for areq in action.requirements.values(): + if not areq['base'] in reqs: + raise Exception(f"Action {action.name} somehow has a req that isn't in the task's req_sequence") + + # Create selected_capabilities (or clear it if it already exists) + areq['selected_capability'] = {} + + for acap, aspecs in areq['capabilities'].items(): # cycle through action's reqs capability keys + if acap in reqs[areq['base']]: # if this capability is listed, it means we plan to use it + areq['selected_capability'][acap] = aspecs # so copy it over + # (there should only be one capability selected per requirement) + + # Note which asset(s) are planned to fill this req + for ass in assets: # try individual assets + met = checkCapability(areq['selected_capability'], [ass], acap) + if met: + areq['assigned_assets'] = [ass] + break + + if not met: # try ALL assets combined + met = checkCapability(areq['selected_capability'], assets, acap) + if met: + areq['assigned_assets'] = assets + else: + raise Exception(f"Task {self.name} could not satisfy action {action.name} capability {acap} with the available assets.") + + + action.assignAssets(assets) + + self.assetList = assets + + if display > 0: + print(f"For the task {self.name}, assigned the assets:") + print([asset['name'] for asset in assets]) + else: + print("This asset assignment is not feasible for the task.") + + + def getSequenceGraph(self, action_sequence=None, plot=True): + '''Generate a multi-directed graph that visalizes action sequencing within the task. + Build a MultiDiGraph with nodes: + Start -> CP1 -> CP2 -> ... -> End + + Checkpoints are computed from action "levels": + level(a) = 1 if no prerequisites. + level(a) = 1 + max(level(p) for p in prerequisites) 1 + the largest level among a’s prerequisites. + Number of checkpoints = max(level) - 1. + ''' + if action_sequence is None: + action_sequence = self.action_sequence + # Compute levels + levels: dict[str, int] = {} + def level_of(a: str, b: set[str]) -> int: + '''Return the level of action a. b is the set of actions currently being explored''' + + # If we have already computed the level, return it + if a in levels: + return levels[a] + + if a in b: + raise ValueError(f"Cycle detected in action sequence at '{a}' in task '{self.name}'. The action cannot be its own prerequisite.") + + b.add(a) + + # Look up prerequisites for action a. + pres = action_sequence.get(a, []) + if not pres: + lv = 1 # No prerequisites, level 1 + else: + # If a prerequisites name is not in the dict, treat it as a root (level 1) + lv = 1 + max(level_of(p, b) if p in action_sequence else 1 for p in pres) + # b.remove(a) # if you want to unmark a from the explored dictionary, b, uncomment this line. + levels[a] = lv + return lv + + for a in action_sequence: + level_of(a, set()) + + max_level = max(levels.values(), default=1) + num_cps = max(0, max_level - 1) + + H = nx.MultiDiGraph() + + # Add the Start -> [checkpoints] -> End nodes + H.add_node("Start") + for i in range(1, num_cps + 1): + H.add_node(f"CP{i}") + H.add_node("End") + + shells = [["Start"]] + if num_cps > 0: + # Middle shells + cps = [f"CP{i}" for i in range(1, num_cps + 1)] + shells.append(cps) + shells.append(["End"]) + + pos = nx.shell_layout(H, nlist=shells) + + xmin, xmax = -2.0, 2.0 # maybe would need to change those later on. + pos["Start"] = (xmin, 0) + pos["End"] = (xmax, 0) + + # Add action edges + # Convention: + # level 1 actions: Start -> CP1 (or Start -> End if no CPs) + # level L actions (2 <= L < max_level): CP{L-1} -> CP{L} + # level == max_level actions: CP{num_cps} -> End + for action, lv in levels.items(): + action = self.actions[action] + if num_cps == 0: + # No checkpoints: all actions from Start to End + H.add_edge("Start", "End", key=action, duration=action.duration, cost=action.cost) + else: + if lv == 1: + H.add_edge("Start", "CP1", key=action, duration=action.duration, cost=action.cost) + elif lv < max_level: + H.add_edge(f"CP{lv-1}", f"CP{lv}", key=action, duration=action.duration, cost=action.cost) + else: # lv == max_level + H.add_edge(f"CP{num_cps}", "End", key=action, duration=action.duration, cost=action.cost) + # 3. Compute cumulative start time for each level + level_groups = {} + for action, lv in levels.items(): + level_groups.setdefault(lv, []).append(action) + + level_durations = {lv: max(self.actions[a].duration for a in acts) + for lv, acts in level_groups.items()} + + + task_duration = sum(level_durations.values()) + level_start_time = {} + elapsed = 0.0 + cp_string = [] + for lv in range(1, max_level + 1): + level_start_time[lv] = elapsed + elapsed += level_durations.get(lv, 0.0) + # also collect all actions at this level for title + acts = [a for a, l in levels.items() if l == lv] + if acts and lv <= num_cps: + cp_string.append(f"CP{lv}: {', '.join(acts)}") + elif acts and lv > num_cps: + cp_string.append(f"End: {', '.join(acts)}") + # Assign to self: + self.duration = task_duration + self.actions_ti = {a: level_start_time[lv] for a, lv in levels.items()} + self.sequence_graph = H + title_str = f"Task {self.name}. Duration {self.duration:.2f} : " + " | ".join(cp_string) + if plot: + fig, ax = plt.subplots() + # pos = nx.shell_layout(G) + nx.draw(H, pos, with_labels=True, node_size=500, node_color="lightblue", edge_color='white') + + label_positions = {} # to store label positions for each edge + # Group edges by unique (u, v) pairs + for (u, v) in set((u, v) for u, v, _ in H.edges(keys=True)): + # get all edges between u and v (dict keyed by edge key) + edge_dict = H.get_edge_data(u, v) # {key: {attrdict}, ...} + n = len(edge_dict) + + # curvature values spread between -0.3 and +0.3 [helpful to visualize multiple edges] + if n==1: + rads = [0] + offsets = [0.5] + else: + rads = np.linspace(-0.3, 0.3, n) + offsets = np.linspace(0.2, 0.8, n) + + # draw each edge + durations = [d.get("duration", 0.0) for d in edge_dict.values()] + scale = max(max(durations), 0.0001) # avoid div by zero + width_scale = 4.0 / scale # normalize largest to ~4px + + for rad, offset, (k, d) in zip(rads, offsets, edge_dict.items()): + nx.draw_networkx_edges( + H, pos, edgelist=[(u, v)], ax=ax, + connectionstyle=f"arc3,rad={rad}", + arrows=True, arrowstyle="-|>", + edge_color="gray", + width=max(0.5, d.get("duration", []) * width_scale), + ) + label_positions[(u, v, k)] = offset # store position for edge label + + ax.set_title(title_str, fontsize=12, fontweight="bold") + ax.axis("off") + plt.tight_layout() + + return H + + + def calcDuration(self, duration_interval=0.5): + '''Organizes the actions to be done by this task into the proper order + based on the action_sequence. This is used to fill out + self.actions_ti, self.ti, and self.tf. This method assumes that action.duration + have already been evaluated for each action in self.actions. + ''' + # Initialize dictionaries to hold start and finish times + starts = {} + finishes = {} + + # Iterate through actions in the sequence + for action, dep_actions in self.action_sequence.items(): + # Calculate start time as the max finish time of dependencies + # (set as zero if the action does not depend on other actions in the task) + starts[action] = max((finishes[dep] for dep in dep_actions), default=0) + + # get duration from actions + duration = self.actions[action].duration # in hours + + # Calculate finish time + finishes[action] = starts[action] + duration + + # Update self.actions_ti with relative start times + self.actions_ti = starts + + # Task duration (rounded to nearest interval) + raw_duration = max(finishes.values()) + self.duration = np.round(raw_duration / duration_interval) * duration_interval + + # Update task finish time + self.tf = self.ti + self.duration + + + def calcCost(self): + '''Calculates the total cost of the task based on the costs of individual actions. + Updates self.cost accordingly. This method assumes that action.cost has + already been evaluated for each action in self.actions. + ''' + total_cost = 0.0 + for action in self.actions.values(): + total_cost += action.cost + self.cost = total_cost + return self.cost + + + def setStartTime(self, start_time): + '''Update the start time of all actions based on a new task start time. + This requires that the task's duration and relative action start times are + already calculated. + + Parameters + ---------- + newStart : float + The new start time for the task. All action start times will be adjusted accordingly. + ''' + + # Update task start and finish times + self.ti = start_time + self.tf = start_time + self.duration + + # Update action times + for name, action in self.actions.items(): + action.setStartTime(start_time + self.actions_ti[name]) + + + def clearAssets(self): + ''' + Clear all assigned assets from all actions in the task. + This resets the asset assignments and re-evaluates the actions. + ''' + for action in self.actions.values(): + action.clearAssets() + + # Reinitialize duration and cost after clearing assets. + self.duration = 0 + self.cost = 0 + + + def get_row(self, assets): + '''Get a matrix of (cost, duration) tuples for each asset to perform this task. Will be a row in the task_asset matrix. + + Parameters + ---------- + assets : list + A list of all assets available to perform the task. + + Returns + ------- + matrix : array-like + A 2D array of (cost, duration) tuples indicating the cost and duration for each asset to perform this task. + Must be 2x len(assets). + + ''' + + matrix = np.zeros((len(assets), 2)) + # TODO: build row of matrix that holds (cost, duration) tuple of asset / assets (?) to complete task + + # Could look something like... + ''' + for i, asset in enumerate(assets): + for action in self.actions: # can we do this without the double loop? + if asset in action.roles: + action = self.actions[asset.name] + matrix[i, 0] = action.cost + matrix[i, 1] = action.duration + else: + matrix[i, 0] = -1 # negative cost/duration means asset cannot perform task + matrix[i, 1] = -1 + ''' + + return np.zeros((len(assets), 2)) # placeholder, replace with actual matrix + + + def GanttChart(self, start_at_zero=True, color_by=None): + '''Generate a Gantt chart for the task showing the schedule of actions. + + Returns + ------- + fig : matplotlib.figure.Figure + The figure object containing the Gantt chart. + ax : matplotlib.axes.Axes + The axes object containing the Gantt chart. + ''' + + # --- color palette --- + colors = [ + "lime", "orange", "magenta", "blue", + "red", "yellow", "cyan", "purple" + ] + + fig, ax = plt.subplots(figsize=(6, 6)) + + # Prepare data for Gantt chart + action_names = list(self.actions.keys()) + start_times = [self.actions_ti[name] for name in action_names] + durations = [self.actions[name].duration for name in action_names] + + # Get asset information from action.assets + all_assets = [asset['name'] for asset in self.assetList] # list of asset names + ''' + all_assets = set() + #all_roles = set() + for action in self.actions.values(): + for asset in action.assetList: + all_assets.add(asset) + #all_roles.add(role) + ''' + # Assign colors + if color_by == 'asset': + color_dict = {asset: colors[i] for i, asset in enumerate(all_assets)} + ''' + elif color_by == 'role': + # Flip the colors + colors = colors[::-1] + role_list = list(all_roles) + color_dict = {role: colors[i] for i, role in enumerate(role_list)} + ''' + # Generate vertical lines to indicate the start and finish of the whole task + ax.axvline(x=self.ti, ymin=0, ymax=len(action_names), color='black', linestyle='-', linewidth=2.0) + ax.axvline(x=self.tf, ymin=0, ymax=len(action_names), color='black', linestyle='-', linewidth=2.0) + + # Create bars for each action + ht = 0.4 + for i, (name, start, duration) in enumerate(zip(action_names, start_times, durations)): + opp_i = len(action_names) - i - 1 # to have first action on top + action = self.actions[name] + assets = list({asset['name'] for asset in action.assetList}) + #roles = list({role for role in action.assets.keys()}) + + assets = list(set(assets)) # Remove duplicates from assets + + n_assets = len(assets) + #n_roles = len(roles) + + if color_by is None: + ax.barh(opp_i, duration, color='cyan', left=start, height=ht, align='center') + elif color_by == 'asset': + # Compute vertical offsets if multiple assets + if n_assets == 0: + # No assets info + ax.barh(i, duration, left=start, height=ht, color='cyan', align='center') + else: + sub_ht = ht / n_assets + for j, asset in enumerate(assets): + bottom = opp_i - ht/2 + j * sub_ht + color = color_dict.get(asset, 'gray') + ax.barh(bottom + sub_ht/2, duration, left=start, height=sub_ht * 0.9, + color=color, edgecolor='k', linewidth=0.3, align='center') + ''' + elif color_by == 'role': + # Compute vertical offsets if multiple roles + if n_roles == 0: + # No roles info + ax.barh(opp_i, duration, left=start, height=ht, color='cyan', align='center') + else: + sub_ht = ht / n_roles + for j, role in enumerate(roles): + bottom = opp_i - ht/2 + j * sub_ht + color = color_dict.get(role, 'gray') + ax.barh(bottom + sub_ht/2, duration, left=start, height=sub_ht * 0.9, + color=color, edgecolor='k', linewidth=0.3, align='center') + ''' + else: + color_by = None + raise Warning(f"color_by option '{color_by}' not recognized. Use 'asset', 'role'. None will be used") + + ax.text(self.ti, opp_i, f' {name}', va='center', ha='left', color='black') + ax.axhline(y=opp_i - ht/2, xmin=0, xmax=self.tf, color='gray', linestyle='--', linewidth=0.5) + ax.axhline(y=opp_i + ht/2, xmin=0, xmax=self.tf, color='gray', linestyle='--', linewidth=0.5) + ax.axvline(x=start, ymin=0, ymax=len(action_names), color='gray', linestyle='--', linewidth=0.5) + + # Set y-ticks and labels + ax.set_yticks(range(len(action_names))) + ax.set_yticklabels([]) + + # Set labels and title + ax.set_xlabel('time (hrs.)') + ax.set_title(f'Gantt Chart for Task: {self.name}') + + if color_by == 'asset': + handles = [plt.Rectangle((0, 0), 1, 1, color=color_dict[a]) for a in all_assets] + ax.legend(handles, all_assets, title='Assets', bbox_to_anchor=(1.02, 1), loc='upper right') + ''' + elif color_by == 'role': + handles = [plt.Rectangle((0, 0), 1, 1, color=color_dict[a]) for a in all_roles] + ax.legend(handles, all_roles, title='Roles', bbox_to_anchor=(1.02, 1), loc='upper right') + ''' + if start_at_zero: + ax.set_xlim(0, self.tf + 1) + # Create a grid and adjust layout + # ax.grid(True) + plt.tight_layout() + return fig, ax + + + def chart(self, start_at_zero=True): + '''Generate a chart grouped by asset showing when each asset is active across all actions. + + Parameters + ---------- + start_at_zero : bool, optional + If True, the x-axis starts at zero. Defaults to True. + + Returns + ------- + fig : matplotlib.figure.Figure + The figure object containing the Gantt chart. + ax : matplotlib.axes.Axes + The axes object containing the Gantt chart. + ''' + pass + + + def chart(self, title='', outpath='', dpi=200): + ''' + Render a Gantt-like chart for a single ChartTask with one axes and one horizontal lane per vessel. + • Vessel names as y-tick labels + • Baseline arrows, light span bars, circle bubbles with time inside, title above, + and consistent font sizes. + • Horizontal placement uses Bubble.period when available; otherwise cumulative within vessel. + • Bubbles are colored by Bubble.category (legend added). + + Show an action on multiple lanes if it uses multiple assets. + Skip actions with dur<=0 or with no resolvable lanes. + ''' + + # MH: unsure how much of this up-front stuff is needed + + from dataclasses import dataclass + from typing import List, Optional, Dict, Tuple + import matplotlib.pyplot as plt + + # Data structures + + @dataclass + class Bubble: + action: str + duration_hr: float + label_time: str + period: Optional[Tuple[float, float]] = None + category: Optional[str] = None # new: action category for coloring + + @dataclass + class VesselTimeline: + vessel: str + bubbles: List[Bubble] + + # Color palette + categorization + + # User-requested color scheme + ACTION_TYPE_COLORS: Dict[str, str] = { + 'Mobilization': '#d62728', # red + 'Towing & Transport': '#2ca02c', # green + 'Mooring & Anchors': '#0056d6', # blue + 'Heavy Lift & Installation': '#ffdd00', # yellow + 'Cable Operations': '#9467bd', # purple + 'Survey & Monitoring': '#ff7f0e', # orange + 'Other': '#1f77b4'} # fallback color (matplotlib default) + + + # Keyword buckets → chart categories + CAT_KEYS = [ + ('Mobilization', ('mobilize', 'demobilize')), + ('Towing & Transport', ('transit', 'towing', 'tow', 'convoy', 'linehaul')), + ('Mooring & Anchors', ('anchor', 'mooring', 'pretension', 'pre-tension')), + ('Survey & Monitoring', ('monitor', 'survey', 'inspection', 'rov', 'divers')), + ('Heavy Lift & Installation', ('install_wec', 'install device', 'install', 'heavy-lift', 'lift', 'lower', 'recover_wec', 'recover device')), + ('Cable Operations', ('cable', 'umbilical', 'splice', 'connect', 'wet-mate', 'dry-mate'))] + + + + # MH: making a vessels dict to fit with what this was looking for (quick temporary solution) + vessels = { ves['name'] : ves for ves in self.assetList } + + # reverse lookup for identity → key + id2key = {id(obj): key for key, obj in vessels.items()} + + # unique type → key (used only if type is unique in catalog) + type_counts = {} + for k, obj in vessels.items(): + t = obj.get('type') if isinstance(obj, dict) else getattr(obj, 'type', None) + if t: + type_counts[t] = type_counts.get(t, 0) + 1 + unique_type2key = {} + for k, obj in vessels.items(): + t = obj.get('type') if isinstance(obj, dict) else getattr(obj, 'type', None) + if t and type_counts.get(t) == 1: + unique_type2key[t] = k + + buckets = {} + + for a in self.actions.values(): + if a.duration <= 0.0: + continue # skip if no duration + + aa = getattr(a, 'assets', {}) or {} + + # collect ALL candidate roles → multiple lanes allowed + lane_keys = set() + for v in a.assetList: + + # resolve lane key + lane = id2key.get(id(v)) + if lane is None and isinstance(v, dict): + nm = v.get('name') + if isinstance(nm, str) and nm in vessels: + lane = nm + else: + t = v.get('type') + if t in unique_type2key: + lane = unique_type2key[t] + if lane: + lane_keys.add(lane) + + if not lane_keys: + continue + + # Color code for action categories based on CAT_KEYS + def cat_for(act): + s = f"{getattr(act, 'type', '')} {getattr(act, 'name', '')}".lower().replace('_', ' ') + for cat, keys in CAT_KEYS: + if any(k in s for k in keys): + return cat + return 'Other' + + # one bubble per lane (same fields) + for lane in lane_keys: + b = Bubble( + action=a.name, + duration_hr=a.duration, + label_time=getattr(a, 'label_time', f'{a.duration:.1f}'), + period=( a.ti, a.tf ), + category=cat_for(a)) + + buckets.setdefault(lane, []).append(b) + #breakpoint() + #print('hi') + + # preserve sc.vessels order; only include lanes with content + lanes = [] + for vname in vessels.keys(): + blist = sorted(buckets.get(vname, []), key=lambda b: b.period[0]) + if blist: + lanes.append(VesselTimeline(vessel=vname, bubbles=blist)) + + # Core plotter (single-axes, multiple lanes) + + from matplotlib.lines import Line2D + from matplotlib.patches import Circle + + # --- figure geometry --- + nrows = max(1, len(lanes)) + fig_h = max(3.0, 0.8 + 1*nrows) + fig_w = 5 + + plt.rcdefaults() + plt.close('all') + fig, ax = plt.subplots(figsize=(fig_w, fig_h), dpi=dpi) + + # --- y lanes (top -> bottom keeps given order) --- + vessels_top_to_bottom = lanes # <<< this seems to just make a duplicate reference for the same data + nrows = max(1, len(lanes)) + y_positions = list(range(nrows))[::-1] + name_to_y = {vt.vessel: y_positions[i] for i, vt in enumerate(vessels_top_to_bottom[::-1])} + + ax.set_yticks(y_positions) + ax.set_yticklabels([]) + ax.tick_params(axis='y', labelrotation=0) + + if len(title) > 0: + ax.set_title(title, loc='left', pad=6) + + # --- gather periods, compute x-range --- + x_min, x_max = 0.0, 0.0 + per_row: Dict[str, List[Tuple[float, float, Bubble]]] = {vt.vessel: [] for vt in lanes} + + for vt in lanes: + t_cursor = 0.0 + for b in vt.bubbles: + if b.period: + s, e = float(b.period[0]), float(b.period[1]) + else: + s = t_cursor + e = s + float(b.duration_hr or 0.0) + per_row[vt.vessel].append((s, e, b)) + x_min = min(x_min, s) + x_max = max(x_max, e) + t_cursor = e + + # --- drawing helpers --- + def _draw_lane_baseline(y_val: float): + ax.annotate('', xy=(x_max, y_val), xytext=(x_min, y_val), + arrowprops=dict(arrowstyle='-|>', lw=2)) + + def _draw_span_hint(s: float, e: float, y_val: float): + ax.plot([s, e], [y_val, y_val], lw=4, alpha=0.15, color='k') + + def _bubble_face_color(b: Bubble) -> str: + cat = b.category or 'Other' + return ACTION_TYPE_COLORS.get(cat, ACTION_TYPE_COLORS['Other']) + + def _text_color_for_face(face: str) -> str: + return 'black' if face.lower() in ('#ffdd00', 'yellow') else 'white' + + def _draw_bubble(s: float, e: float, y_val: float, b: Bubble, i_in_row: int): + xc = 0.5*(s + e) + face = _bubble_face_color(b) + txtc = _text_color_for_face(face) + ax.plot(xc, y_val, 'o', ms=22, color=face, zorder=3) + ax.text(xc, y_val, f'{b.label_time}', ha='center', va='center', fontsize=10, + color=txtc, weight='bold') + title_offset = 0.30 if (i_in_row % 2) else 0.20 + ax.text(xc, y_val + title_offset, b.action, ha='center', va='bottom', fontsize=6) + # caps_txt = _capabilities_to_text(b.capabilities) + # if caps_txt: + # ax.text(xc, y_val - 0.26, caps_txt, ha='center', va='top', fontsize=8, wrap=True) + + # --- draw per lane --- + seen_cats: set[str] = set() + for vt in lanes: + y = name_to_y[vt.vessel] + items = sorted(per_row[vt.vessel], key=lambda t: t[0]) + _draw_lane_baseline(y) + for j, (s, e, b) in enumerate(items): + _draw_span_hint(s, e, y) + _draw_bubble(s, e, y, b, j) + seen_cats.add(b.category or 'Other') + + # --- legend --- + handles = [] + legend_cats = [c for c in ACTION_TYPE_COLORS.keys() if c in seen_cats] + # if you prefer to always show all categories, replace the line above with: legend_cats = list(ACTION_TYPE_COLORS.keys()) + for cat in legend_cats: + handles.append(Line2D([0], [0], marker='o', linestyle='none', markersize=6, + markerfacecolor=ACTION_TYPE_COLORS[cat], markeredgecolor='none', label=cat)) + if handles: + # Place the legend below the x-axis label (bottom center) + fig_ = ax.figure + fig_.legend(handles=handles, + loc='lower center', + bbox_to_anchor=(0.5, -0.12), # move below the axis label + ncol=3, + title='Action Types', + frameon=False) + + # --- axes cosmetics & limits --- + if x_max <= x_min: + x_max = x_min + 1.0 + pad = 0.02*(x_max - x_min) if (x_max - x_min) > 0 else 0.5 + ax.set_xlim(x_min - pad, x_max + pad) + + # Draw circled vessel names at the same y positions + x_name = x_min - 3*pad # small left offset inside the axes + + # After you have vessels_top_to_bottom, name_to_y, x_min/x_max, pad, left_extra, x_name... + max_len = max(len(vt.vessel) for vt in vessels_top_to_bottom) # longest label + + # make the circle tighter/looser: + circle_pad = 0.18 + + for vt in vessels_top_to_bottom[::-1]: + y = name_to_y[vt.vessel] + fixed_text = vt.vessel.center(max_len) # pad with spaces to max length + ax.text( + x_name, y, fixed_text, + ha='center', va='center', zorder=6, clip_on=False, + fontsize=6, color='black', fontfamily='monospace', # <- key: monospace + bbox=dict(boxstyle='circle,pad={:.2f}'.format(circle_pad), + facecolor='lightgrey', edgecolor='tomato', linewidth=3)) + + ax.set_xlabel('Timeline (h)') + ax.grid(False) + for spine in ['top', 'right', 'left']: + ax.spines[spine].set_visible(False) + + ax.set_ylim(min(y_positions) - 0.5, max(y_positions) + 0.5) + + fig = ax.figure + # Add extra bottom margin to make space for the legend below the x-axis label + fig.subplots_adjust(left=0.10, right=0.98, top=0.90, bottom=0.15) + + if outpath: + fig.savefig(outpath, dpi=dpi, bbox_inches='tight') + else: + plt.show() + + + + + + +def dependenciesToSequence(dependencies): + ''' + Receive a dictinoary of item dependencies that define a sequence, + and generate a nested list that follows that sequence. + + Example: + B A G D F H + C E + + dependencies = {'a': ['c'], 'b': [], + 'c': [], 'd': ['g'], + 'e': ['d'], 'f': ['d'], + 'g': ['a'], 'h': ['e','f']} + ''' + + n = len(dependencies) # number of actions in this task + acts = list(dependencies.keys()) # list of action names + deps = list(dependencies.values()) # dependencies of each action + si = np.zeros(n)-1 # step index of action (-1 = TBD) + + sequence = [[]] # create first step slot in the sequence + for i in range(n): # go through action: dependencies + if len(deps[i])==0: # no dependency, it's in first (0) step + si[i] = 0 # mark as being in the first step + sequence[0].append(acts[i]) + + for j in range(1,n): # look for step j actions + #print(f"Step {j} ----") + sequence.append([]) # create next step slot in the sequence + for i in range(n): # go through action: dependencies + #print(f" Action {i}") + if si[i] < 0: # only look at actions that aren't yet sequenced + if any([prev_act in deps[i] for prev_act in sequence[j-1]]): + si[i] = j + sequence[j].append(acts[i]) + + # Clean up the sequence + clean_sequence = [] + for step in sequence: + if len(step) == 1: + clean_sequence.append(step[0]) # add single entry by itself (not a list) + elif len(step) == 0: + break # if we've hit an empty step, we're at the end + else: + clean_sequence.append(step) + + return clean_sequence + + +def combineCapabilities(assets, display=0): + '''Combines the capabilies across multiple assets.''' + + specs_to_max = ['hook_height_m', 'depth_rating_m', + 'max_depth_m', 'accuracy_m', + 'speed_mpm', 'capacity_t'] + + asset_caps = {} + for asset in assets: + for cap, specs in asset['capabilities'].items(): + if not cap in asset_caps: # add the capability entry if absent + asset_caps[cap] = {} + for key, val in specs.items(): + if key in asset_caps[cap]: + if key in specs_to_max: + asset_caps[cap][key] = max(asset_caps[cap][key], val) + else: + asset_caps[cap][key] += val # add to the spec + else: + asset_caps[cap][key] = val # create the spec + + if display > 0: + print('Combined asset specs are as follows:') + for cap, specs in asset_caps.items(): + print(f' Capability {cap}') + for key, val in specs.items(): + print(f' Total spec {key} = {val}') + + return asset_caps + + +def checkCapability(required_capability, assets, capability_name, display=0): + '''Check if the required capability can be met by the combination + of the assets specified.''' + + # required_capability is assumed tobe a dict of cap_name : specs, meaning capability_name is probably redundant <<< + + asset_caps = combineCapabilities(assets) + + + # See if summed asset capabilities satisfy any of the n task_req breakdowns + # .>>> an output of this could also be assigning assets to action requirements!! >>> + + requirements_met = [] + assignable = [] + + # Let's check if each capability is sufficiently provided for + capable = True # starting with optimism... + + for cap, specs in required_capability.items(): # go throuch each capability of the requirement + + if not cap == capability_name: + breakpoint() + print('there is a contradiction...') + + + if cap not in asset_caps: # assets don't have this capability, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' is missing from the assets.") + break + + for key, val in specs.items(): # go through each spec for this capability + + if val == 0: # if zero value, no spec required, move on + continue + if key not in asset_caps[cap]: # if the spec is missing, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' does not have spec '{key}'.") + break + if asset_caps[cap][key] < val: # if spec is too small, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' does not meet spec '{key}' requirement of {val:.2f} (has {asset_caps[cap][key]:.2f}).") + break + + # Final call on whether requirement can be met + if capable: + return True + else: + return False + + +def doCapsMeetRequirements(asset_caps, requirements, display=0): + '''Checks if asset capabilities collectively can satisfy the listed + requirements.''' + + requirements_met = {} # dictionary of requirements being met True/False + + for req, caps in requirements.items(): # go through each requirement + + requirements_met[req] = False # start assume it is not met + + # Let's check if each capability is sufficiently provided for + capable = True # starting with optimism... + + for cap, specs in caps.items(): # go throuch each capability of the requirement + + if cap not in asset_caps: # assets don't have this capability, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' is missing from the assets.") + break + + for key, val in specs.items(): # go through each spec for this capability + + if val == 0: # if zero value, no spec required, move on + continue + if key not in asset_caps[cap]: # if the spec is missing, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' does not have spec '{key}'.") + break + if asset_caps[cap][key] < val: # if spec is too small, fail + capable = False + if display > 2: print(f"Warning: capability '{cap}' does not meet spec '{key}' requirement of {val:.2f} (has {asset_caps[cap][key]:.2f}).") + break + # Final call on whether requirement can be met + if capable: + requirements_met[req] = True + else: + requirements_met[req] = False + if display > 1: print(f"Requirement '{req}' is not met by asset(s):") + if display > 2: print(f"{asset_caps}.") + + return requirements_met + + + +if __name__ == '__main__': + pass + diff --git a/famodel/irma/task_asset_generator.py b/famodel/irma/task_asset_generator.py new file mode 100644 index 00000000..872d1370 --- /dev/null +++ b/famodel/irma/task_asset_generator.py @@ -0,0 +1,486 @@ +""" +Capability-Based Asset Group Generator for MILP Scheduler + +This module provides intelligent asset group generation based on capability matching, +designed to work with offshore installation scheduling problems. It creates sparse +task-asset matrices by pre-filtering operationally feasible combinations. + +Key Features: +- Capability-based matching between tasks and assets +- Smart pre-filtering to avoid 2^N explosion +- Strategic batch support for aggregated installation tasks +- Sparse matrix generation for computational efficiency +- Operational feasibility validation + +Strategic batch support allows multiple task alternatives like: +- "install_1_anchor" vs "install_4_anchors" with mutual exclusion +- Pre-aggregated task strategies for large-scale projects +""" + +import numpy as np +from itertools import combinations + + +class TaskAssetGroupGenerator: + """ + Generator for asset group assignments to tasks based on shared capabilities. + + Creates sparse task-asset matrices by matching task capability requirements + to asset capabilities, with support for flexible batch sizes per task. + + Features: + - Configurable batch sizes (anchors per task) + - Capability-based asset matching + - Smart pre-filtering for computational efficiency + - Automatic handling of remainder tasks for uneven divisions + """ + + def __init__(self, max_group_size=3): + """Initialize the generator with constraints.""" + self.max_group_size = max_group_size + self.verbose = False + + def generate_asset_groups(self, task_definitions, asset_definitions): + """Generate asset groups and return scheduler-ready inputs.""" + if self.verbose: + print("=== Capability-Based Asset Group Generation ===") + + # === VALIDATION PHASE === + validated_tasks = self._validate_task_definitions(task_definitions) + validated_assets = self._validate_asset_definitions(asset_definitions) + + if self.verbose: + print(f"\n=== VALIDATION RESULTS ===") + print(f"Validated tasks: {len(validated_tasks)}") + print(f"Validated assets: {len(validated_assets)}") + + # === ASSET GROUP GENERATION === + asset_groups = self._generate_feasible_asset_groups(validated_assets) + + if self.verbose: + print(f"\n=== ASSET GROUP RESULTS ===") + print(f"Feasible asset groups: {len(asset_groups)}") + + # === TASK-ASSET MATCHING === + task_asset_matches = self._match_tasks_to_asset_groups(validated_tasks, asset_groups) + task_asset_matrix = self._build_sparse_matrix(validated_tasks, asset_groups, task_asset_matches) + + # === SCHEDULER INPUT PREPARATION === + scheduler_inputs = { + "task_asset_matrix": task_asset_matrix, + "tasks": [task["name"] for task in validated_tasks], + "assets": [group["name"] for group in asset_groups], + "asset_groups": asset_groups + } + + if self.verbose: + self._print_efficiency_stats(scheduler_inputs) + + return scheduler_inputs + + def _validate_task_definitions(self, task_definitions): + """Validate and standardize task capability requirements.""" + validated_tasks = [] + + if self.verbose: + print(f"\n=== TASK VALIDATION ===") + + for task_name, requirements in task_definitions.items(): + if "required_capabilities" not in requirements: + raise ValueError(f"Task '{task_name}' missing required_capabilities") + + # === TASK CONFIGURATION === + validated_task = { + "name": task_name, + "required_capabilities": set(requirements["required_capabilities"]), + "min_weather_rating": requirements.get("min_weather_rating", 1), + "max_duration": requirements.get("max_duration", 24), + "complexity_factor": requirements.get("complexity_factor", 1.0), + "batch_size": requirements.get("batch_size", 1) + } + validated_tasks.append(validated_task) + + if self.verbose: + batch_info = f" (batch_size={validated_task['batch_size']})" if validated_task['batch_size'] > 1 else "" + print(f" Task: {task_name} requires {validated_task['required_capabilities']}{batch_info}") + + return validated_tasks + + def _validate_asset_definitions(self, asset_definitions): + """Validate and standardize asset capabilities.""" + validated_assets = [] + + if self.verbose: + print(f"\n=== ASSET VALIDATION ===") + + for asset_name, capabilities in asset_definitions.items(): + if "capabilities" not in capabilities: + raise ValueError(f"Asset '{asset_name}' missing capabilities") + + # === ASSET CONFIGURATION === + validated_asset = { + "name": asset_name, + "capabilities": set(capabilities["capabilities"]), + "max_weather": capabilities.get("max_weather", 1), + "base_cost": capabilities.get("base_cost", 10000), + "daily_rate": capabilities.get("daily_rate", 5000), + "availability": capabilities.get("availability", 1.0) + } + validated_assets.append(validated_asset) + + if self.verbose: + print(f" Asset: {asset_name} provides {validated_asset['capabilities']}") + + return validated_assets + + def _generate_feasible_asset_groups(self, validated_assets): + """Generate all operationally feasible asset group combinations.""" + asset_groups = [] + + if self.verbose: + print(f"\n=== ASSET GROUP GENERATION ===") + + # === INDIVIDUAL ASSET GROUPS === + for asset in validated_assets: + group = { + "name": asset["name"], + "assets": [asset["name"]], + "combined_capabilities": asset["capabilities"], + "min_weather": asset["max_weather"], + "total_cost": asset["base_cost"], + "total_daily_rate": asset["daily_rate"], + "group_size": 1 + } + asset_groups.append(group) + + if self.verbose: + print(f" Individual asset groups: {len(asset_groups)}") + + # === MULTI-ASSET COMBINATIONS === + combination_count = 0 + for size in range(2, min(len(validated_assets) + 1, self.max_group_size + 1)): + for combo in combinations(validated_assets, size): + if self._is_operationally_feasible(combo): + group_props = self._calculate_group_properties(combo) + asset_groups.append(group_props) + combination_count += 1 + + if self.verbose: + print(f" Multi-asset combinations: {combination_count}") + print(f" Total asset groups: {len(asset_groups)}") + + return asset_groups + + def _is_operationally_feasible(self, asset_combination): + """Apply operational feasibility filters to asset combinations.""" + + # === FEASIBILITY FILTER 1: Weather compatibility === + # All assets must handle similar weather conditions + weather_ratings = [asset["max_weather"] for asset in asset_combination] + if max(weather_ratings) - min(weather_ratings) > 2: + return False + + # === FEASIBILITY FILTER 2: Capability overlap === + # Avoid redundant capabilities (inefficient combinations) + all_capabilities = [asset["capabilities"] for asset in asset_combination] + total_capabilities = set().union(*all_capabilities) + individual_count = sum(len(caps) for caps in all_capabilities) + + # Reject if overlap is too high (more than 70% overlap) + overlap_ratio = (individual_count - len(total_capabilities)) / individual_count + if overlap_ratio > 0.7: + return False + + # === FEASIBILITY FILTER 3: Cost efficiency === + # Combination shouldn't be extremely expensive + total_cost = sum(asset["base_cost"] for asset in asset_combination) + avg_individual_cost = total_cost / len(asset_combination) + if avg_individual_cost > 100000: + return False + + # === FEASIBILITY FILTER 4: Group size limits === + # Practical limits on group size + if len(asset_combination) > self.max_group_size: + return False + + return True + + def _calculate_group_properties(self, asset_combination): + """Calculate combined properties for an asset group.""" + assets = list(asset_combination) + asset_names = [asset["name"] for asset in assets] + + # === CAPABILITY COMBINATION === + combined_capabilities = set() + for asset in assets: + combined_capabilities.update(asset["capabilities"]) + + # === GROUP PROPERTY CALCULATION === + min_weather = min(asset["max_weather"] for asset in assets) + total_cost = sum(asset["base_cost"] for asset in assets) + total_daily_rate = sum(asset["daily_rate"] for asset in assets) + group_name = "+".join(asset_names) + + return { + "name": group_name, + "assets": asset_names, + "combined_capabilities": combined_capabilities, + "min_weather": min_weather, + "total_cost": total_cost, + "total_daily_rate": total_daily_rate, + "group_size": len(assets) + } + + def _match_tasks_to_asset_groups(self, validated_tasks, asset_groups): + """Match tasks to feasible asset groups based on capability requirements.""" + task_asset_matches = {} + + for task in validated_tasks: + task_name = task["name"] + feasible_groups = [] + + for group in asset_groups: + if self._can_group_handle_task(group, task): + # Calculate cost and duration for this task-group combination + cost, duration = self._calculate_task_cost_duration(task, group) + feasible_groups.append({ + "group_name": group["name"], + "cost": cost, + "duration": duration + }) + + task_asset_matches[task_name] = feasible_groups + + if self.verbose: + print(f" Task '{task_name}' can be handled by {len(feasible_groups)} asset groups") + + return task_asset_matches + + def _can_group_handle_task(self, asset_group, task): + """Check if an asset group can handle a specific task.""" + # Capability check + required_caps = task["required_capabilities"] + available_caps = asset_group["combined_capabilities"] + + if not required_caps.issubset(available_caps): + return False + + # Weather rating check + if asset_group["min_weather"] < task["min_weather_rating"]: + return False + + return True + + def _calculate_task_cost_duration(self, task, asset_group): + """Calculate cost and duration for a task-asset group combination.""" + + # === DURATION CALCULATION === + base_duration = task["max_duration"] + complexity_factor = task["complexity_factor"] + batch_size = task.get("batch_size", 1) + + # Duration scales with complexity and batch size, but with efficiency gains + batch_efficiency = 1.0 if batch_size == 1 else (batch_size * 0.8) # 20% efficiency gain for batches + duration = base_duration * complexity_factor * batch_efficiency + + # === COST CALCULATION === + setup_cost = asset_group["total_cost"] * 0.1 # 10% of asset cost as setup + operational_cost = asset_group["total_daily_rate"] * (duration / 24) # Daily rate prorated + batch_cost_factor = batch_size * 0.9 # 10% cost efficiency for larger batches + + total_cost = (setup_cost + operational_cost) * batch_cost_factor + + return round(total_cost, 2), round(duration, 2) + + def _build_sparse_matrix(self, validated_tasks, asset_groups, task_asset_matches): + """Build sparse task-asset matrix avoiding mostly (-1,-1) entries.""" + num_tasks = len(validated_tasks) + num_groups = len(asset_groups) + + # Initialize with infeasible values using object array + matrix = np.empty((num_tasks, num_groups), dtype=object) + matrix.fill((-1, -1)) + + # Fill in feasible combinations + for task_idx, task in enumerate(validated_tasks): + task_name = task["name"] + feasible_groups = task_asset_matches.get(task_name, []) + + for match in feasible_groups: + # Find asset group index + group_idx = next(i for i, group in enumerate(asset_groups) + if group["name"] == match["group_name"]) + matrix[task_idx, group_idx] = (match["cost"], match["duration"]) + + return matrix + + def _print_efficiency_stats(self, scheduler_inputs): + """Print efficiency statistics about the generated matrix.""" + matrix = scheduler_inputs["task_asset_matrix"] + total_entries = matrix.size + + # Count feasible entries by checking each element + feasible_entries = 0 + for i in range(matrix.shape[0]): + for j in range(matrix.shape[1]): + if matrix[i, j] != (-1, -1): + feasible_entries += 1 + + sparsity = 1 - (feasible_entries / total_entries) + + print(f"\n=== Efficiency Statistics ===") + print(f"Task-Asset Matrix: {matrix.shape}") + print(f"Total entries: {total_entries}") + print(f"Feasible entries: {feasible_entries} ({feasible_entries/total_entries:.1%})") + print(f"Sparsity: {sparsity:.1%} (reduced computational load)") + print(f"Asset groups: {len(scheduler_inputs['assets'])}") + +def generate_capability_based_groups(task_definitions, asset_definitions, max_group_size=3, verbose=True): + """ + Convenience function to generate capability-based asset groups. + + Args: + task_definitions (dict): Task capability requirements + asset_definitions (dict): Asset capabilities + max_group_size (int): Maximum assets per group + verbose (bool): Print detailed output + + Returns: + dict: Scheduler inputs ready for use with the MILP scheduler + + Example: + task_defs = { + "install_anchor_task_1": { + "required_capabilities": ["anchor_handling", "positioning"], + "min_weather_rating": 1, + "max_duration": 12, + "batch_size": 1 + } + } + + asset_defs = { + "anchor_vessel": { + "capabilities": ["anchor_handling", "positioning"], + "max_weather": 2, + "base_cost": 30000 + } + } + + scheduler_inputs = generate_capability_based_groups(task_defs, asset_defs) + """ + generator = TaskAssetGroupGenerator(max_group_size=max_group_size) + generator.verbose = verbose + return generator.generate_asset_groups(task_definitions, asset_definitions) + + +if __name__ == "__main__": + # Configurable anchor installation demo + # + # CONFIGURATION PARAMETERS: + # - num_anchors: Total number of anchors to install for your project + # - anchors_per_task: Batch size - how many anchors each task will install + # + # EXAMPLES: + # - num_anchors=100, anchors_per_task=1 → 100 individual tasks + # - num_anchors=100, anchors_per_task=4 → 25 batch tasks (4 anchors each) + # - num_anchors=100, anchors_per_task=100 → 1 mega-batch task + # - num_anchors=7, anchors_per_task=3 → 2 tasks (3 anchors) + 1 task (1 anchor) + + num_anchors = 4 # Total number of units to install + anchors_per_task = 1 # Batch size: anchors installed per task + + # Calculate strategy based on batch size + if anchors_per_task == 1: + strategy = 1 # Individual tasks + elif anchors_per_task > 1 and anchors_per_task < num_anchors: + strategy = 2 # Intermediate batches + elif anchors_per_task == num_anchors: + strategy = 3 + else: + raise ValueError("Input strategy is not yet supported") + + print(f"=== Anchor Installation Demo ===") + print(f"Total anchors: {num_anchors}") + print(f"Anchors per task: {anchors_per_task}") + print(f"Strategy: {strategy}\n") + + task_definitions = {} # initialize the dictionary of tasks + + if strategy == 1: + # Strategy 1: Multiple individual anchor installation tasks + num_tasks = num_anchors // anchors_per_task + print(f"Strategy 1: {num_tasks} tasks, each installing {anchors_per_task} anchor(s)") + + for i in range(1, num_tasks + 1): + task_name = f"install_anchor_{i}" + task_definitions[task_name] = { + "required_capabilities": ["anchor_handling", "positioning"], + "max_duration": 12 * anchors_per_task, # Scale duration with batch size + "batch_size": anchors_per_task + } + + elif strategy == 2: + # Strategy 2: Intermediate batches + num_batches = num_anchors // anchors_per_task + print(f"Strategy 2: {num_batches} batch tasks, each installing {anchors_per_task} anchors") + + for i in range(1, num_batches + 1): + task_name = f"install_batch_{i}" + task_definitions[task_name] = { + "required_capabilities": ["anchor_handling", "positioning"], + "max_duration": 8 * anchors_per_task, # Batch efficiency + "batch_size": anchors_per_task + } + + # Handle remainder anchors + remainder = num_anchors % anchors_per_task + if remainder > 0: + task_name = f"install_batch_{num_batches + 1}" + task_definitions[task_name] = { + "required_capabilities": ["anchor_handling", "positioning"], + "max_duration": remainder * 8, + "batch_size": remainder + } + + elif strategy == 3: + # Single batch for all anchors + print(f"Strategy 3: 1 batch task installing {num_anchors} anchors at once") + task_definitions["install_anchors"] = { + "required_capabilities": ["anchor_handling", "positioning"], + "max_duration": num_anchors * 8, # Batch efficiency: 8 hours per anchor + "batch_size": num_anchors + } + + else: + raise ValueError("Strategy must be 1, 2, or 3") + + # Same asset definitions for both strategies + asset_definitions = { + "anchor_vessel": { + "capabilities": ["anchor_handling", "positioning"], + "max_weather": 2, + "base_cost": 30000, + "daily_rate": 15000 + }, + "positioning_vessel": { + "capabilities": ["positioning"], + "max_weather": 3, + "base_cost": 15000, + "daily_rate": 6000 + } + } + + # Generate asset groups for anchor tasks + scheduler_inputs = generate_capability_based_groups( + task_definitions, + asset_definitions, + max_group_size=2 + ) + + print(f"=== Results ===") + print(f"Total anchors configured: {num_anchors}") + print(f"Anchors per task: {anchors_per_task}") + print(f"Generated tasks: {scheduler_inputs['tasks']}") + print(f"Number of tasks: {len(scheduler_inputs['tasks'])}") + print(f"Asset groups: {len(scheduler_inputs['assets'])}") + print(f"Matrix shape: {scheduler_inputs['task_asset_matrix'].shape}") \ No newline at end of file diff --git a/famodel/irma/terminologies.md b/famodel/irma/terminologies.md new file mode 100644 index 00000000..76de0446 --- /dev/null +++ b/famodel/irma/terminologies.md @@ -0,0 +1,48 @@ +## Key Terminologies + +### Actions +- **Definition**: The smallest unit of work that the system simulates. +- **Purpose**: Represents a specific action to be performed, such as transporting an anchor, installing a mooring, or deploying a WEC. +- **Examples**: + - "Anchor installation" + - "Mooring deployment" + +### Tasks +- **Definition**: A logical group of one or more actions that are bounded by a "From-To Port" constraint. +- **Purpose**: Represents a higher-level grouping of actions that are executed together as part of a specific phase of the installation process. +- **Examples**: + - "Anchor Installation Task" + - "Mooring Deployment Task" + +### Dependencies +- **Definition**: Logical constraints that determine the dependencies between actions. +- **Purpose**: Ensures that actions are executed in a given order based on logical and physical requirements. +- **Examples**: + - "Anchor installation depends on anchor transport to the site." + - "Mooring deployment depends on anchor installation." + +### Action Sequencing +- **Definition**: The process of determining the sequence in which actions take place within a task. +- **Purpose**: Ensures that actions are executed in a logical and efficient order, respecting dependencies and resource constraints. + +### Capabilities +- **Definition**: The specific functionality that an asset (e.g., vessel, port) can perform. +- **Purpose**: Determines which assets are suitable for specific actions. +- **Examples**: + - A vessel with "crane" capability can install anchors. + +### Metrics +- **Definition**: Quantifiable measurements of assets based on their capabilities. +- **Purpose**: Used to evaluate and compare assets for suitability and efficiency in performing actions. +- **Examples**: + - **Speed**: The transit speed of a vessel. + - **Capacity**: The cargo capacity of a vessel. + +### Roles +- **Definition**: Functional assignments of assets in the context of actions. +- **Purpose**: Specifies how each asset contributes to the completion of an action. +- **Examples**: + - **Carrier**: Assigned to carry specific equipment or materials. + - **Operator**: Assigned to operate machinery or perform specialized tasks. + +--- \ No newline at end of file diff --git a/famodel/irma/vessels.yaml b/famodel/irma/vessels.yaml new file mode 100644 index 00000000..347ef97f --- /dev/null +++ b/famodel/irma/vessels.yaml @@ -0,0 +1,336 @@ +# This file defines standard vessels aligned to current capabilities & actions + +# --- Anchor Handling Tug / Supply Vessel (AHTS / AHV) --- + +AHTS_alpha: + # Offshore Tug/Anchor Handling Tug Supply (AHTS) – Towing floating structures, handling and laying anchors/mooring lines, tensioning and positioning support. + name: AHTS_alpha + type: AHTS + transport: + transit_speed_mps: 4.7 + Hs_m : 5 + station_keeping: + type: DP2 + capabilities: + deck_space: + area_m2: 800 + max_load_t: 1500 + bollard_pull: + max_force_t: 200 + site_speed_mps: 1.5 # <<< temporary value added + winch: + max_line_pull_t: 150 + brake_load_t: 300 + speed_mpm: 20 + crane: + capacity_t: 50 + hook_height_m: 25 + speed_mpm: 10 # <<< + chain_locker: + volume_m3: 150 + line_reel: + volume_m3: 200 + length_capacity_m: 5000 + pump_subsea: + power_kW: 75 + pressure_bar: 200 + weight_t: 3 + dimensions_m: [2, 1.5, 1.5] + positioning_system: + accuracy_m: 1.0 + methods: [USBL, INS] + monitoring_system: + metrics: [pressure, flow, tilt] + sampling_rate_hz: 10 + actions: + tow: {} + lay_mooring: {} + mooring_hookup: {} + install_anchor: {} + retrieve_anchor: {} + install_semisub: {} + install_spar: {} + install_tlp: {} + day_rate: 107187 # USD/day taken from ORBIT: https://github.com/WISDEM/ORBIT/blob/dev/library/vessels/example_ahts_vessel.yaml + +# --- Multipurpose Support Vessel --- + +MPSV_01: + # Multi-Purpose Support Vessel (MSV) – Flexible vessel used for maintenance, diving, construction, or ROV tasks. Combines features of CSV, DSV and ROVSV. + name: MPSV_01 + type: MSV + transport: + transit_speed_mps: 4.7 + Hs_m : 5 + station_keeping: + type: DP2 + capabilities: + deck_space: + area_m2: 900 + max_load_t: 1500 + crane: + capacity_t: 150 + hook_height_m: 45 + speed_mpm: 10 # <<< + winch: + max_line_pull_t: 60 + brake_load_t: 120 + speed_mpm: 16 + # mooring_work: + # line_types: [chain] + # stern_roller: true + # shark_jaws: true + # towing_pin_rating_t: 300 + stern_roller: + width_m: 3.0 # m <> + shark_jaws: + max_load_t: 200 # t + positioning_system: + accuracy_m: 1.0 + methods: [USBL, INS] + monitoring_system: + metrics: [pressure, tilt] + sampling_rate_hz: 10 + rov: + class: OBSERVATION + depth_rating: 3000 + weight_t: 7 + dimensions_m: [3, 2, 2] + actions: + install_anchor: {} + retrieve_anchor: {} + mooring_hookup: {} + lay_mooring: {} + install_wec: {} + monitor_installation: {} + day_rate: 122699 # USD/day taken from ORBIT (for support vessel: https://github.com/WISDEM/ORBIT/blob/dev/library/vessels/example_support_vessel.yaml) + +# --- Construction Support Vessel --- + +CSV_A: + # Construction Support Vessel (CSV) – General-purpose vessel supporting subsea construction, cable lay and light installation. Equipped with cranes, moonpools and ROVs. + name: CSV_A + type: CSV + transport: + transit_speed_mps: 4.7 + Hs_m : 5 + station_keeping: + type: DP2 + capabilities: + deck_space: + area_m2: 1200 + max_load_t: 2000 + crane: + capacity_t: 250 + hook_height_m: 60 + speed_mpm: 10 # <<< + winch: + max_line_pull_t: 75 + brake_load_t: 150 + speed_mpm: 18 + positioning_system: + accuracy_m: 0.5 + methods: [USBL, LBL, INS] + sonar_survey: + types: [MBES, SSS] + resolution_m: 0.05 + monitoring_system: + metrics: [pressure, flow, tilt, torque] + sampling_rate_hz: 20 + pump_surface: + power_kW: 150 + pressure_bar: 200 + weight_t: 8 + dimensions_m: [6, 2.5, 2.5] + pump_subsea: + power_kW: 75 + pressure_bar: 200 + weight_t: 3 + dimensions_m: [2, 1.5, 1.5] + rov: + class: WORK-CLASS + depth_rating: 3000 + weight_t: 8 + dimensions_m: [3, 2, 2] + actions: + lay_mooring: {} + mooring_hookup: {} + lay_cable: {} + lay_and_bury_cable: {} + monitor_installation: {} + day_rate: 122699 # USD/day taken from ORBIT (for support vessel: https://github.com/WISDEM/ORBIT/blob/dev/library/vessels/example_support_vessel.yaml) +# --- ROV Support Vessel --- + +ROVSV_X: + # ROV Support Vessel (ROVSV) – Dedicated to operating and supporting Remotely Operated Vehicles (ROVs) for inspection, survey or intervention. + name: ROVSV_X + type: ROVSV + transport: + transit_speed_mps: 6.7 + Hs_m : 5 + station_keeping: + type: DP2 + capabilities: + deck_space: + area_m2: 600 + max_load_t: 1000 + crane: + capacity_t: 100 + hook_height_m: 35 + speed_mpm: 10 # <<< + rov: + class: WORK-CLASS + depth_rating: 3000 + weight_t: 7 + dimensions_m: [3, 2, 2] + positioning_system: + accuracy_m: 0.5 + methods: [USBL, LBL, DVL, INS] + sonar_survey: + types: [MBES, SSS, SBP] + resolution_m: 0.1 + monitoring_system: + metrics: [tilt, video, torque] + sampling_rate_hz: 25 + actions: + monitor_installation: {} + site_survey: {} + day_rate: 52500 # USD/day taken from a Nauticus Robotics post on X: https://x.com/nautrobo/status/1840830080748003551 +# --- Diving Support Vessel --- + +DSV_Moon: + # Diving Support Vessel (DSV) – Specifically equipped to support saturation diving operations. Includes diving bells, decompression chambers and dynamic positioning. + name: DSV_Moon + type: DSV + transport: + transit_speed_mps: 4.7 + Hs_m : 5 + station_keeping: + type: DP2 + capabilities: + deck_space: + area_m2: 800 + max_load_t: 1200 + crane: + capacity_t: 150 + hook_height_m: 40 + speed_mpm: 10 # <<< + positioning_system: + accuracy_m: 0.5 + methods: [USBL, LBL, INS] + monitoring_system: + metrics: [video, depth] + sampling_rate_hz: 30 + actions: + monitor_installation: {} + site_survey: {} + day_rate: x # USD/day (research needed) +# --- Heavy Lift Vessel --- + +HL_Giant: + # Heavy Lift Vessel (HL) – Used for transporting and installing very large components, like jackets, substations, or monopiles. Equipped with high-capacity cranes (>3000 t). + name: HL_Giant + type: HL + transport: + transit_speed_mps: 4.7 + Hs_m : 7 + station_keeping: + type: DP2 + capabilities: + deck_space: + area_m2: 4000 + max_load_t: 8000 + crane: + capacity_t: 5000 + hook_height_m: 150 + speed_mpm: 10 # <<< + positioning_system: + accuracy_m: 1.0 + methods: [USBL, INS] + monitoring_system: + metrics: [position, tilt] + sampling_rate_hz: 5 + actions: + transport_components: {} + install_wec: {} + install_wtg: {} + day_rate: 624612 # USD/day taken from Orbit: https://github.com/WISDEM/ORBIT/blob/dev/library/vessels/example_heavy_lift_vessel.yaml +# --- Survey Vessel --- + +SURV_Swath: +# Survey Vessel (SURV) – Seabed mapping and soil characterization, positioning and embedment verification of anchors. Equipped with sonar, USBL/LBL, and profiling equipment. + type: SURV + transport: + transit_speed_mps: 5.0 + Hs_m : 6 + station_keeping: + type: DP1 + capabilities: + deck_space: + area_m2: 200 + max_load_t: 200 + positioning_system: + accuracy_m: 0.3 + methods: [USBL, LBL, INS] + sonar_survey: + types: [MBES, SSS, SBP] + resolution_m: 0.05 + monitoring_system: + metrics: [bathymetry] + sampling_rate_hz: 10 + actions: + site_survey: {} + monitor_installation: {} + day_rate: x # USD/day (research needed) +# --- Barge --- + +Barge_squid: + # Barge – non-propelled flat-top vessel used for transporting heavy equipment, components and materials. Requires towing or positioning support from tugs or AHTS vessels. + name: Barge_squid + type: BARGE + transport: + transit_speed_mps: 2 # No self-propulsion + Hs_m: 4.0 # Maximum significant wave height for safe transport + station_keeping: + type: anchor_based # Held in position using anchors and winches + capabilities: + deck_space: + area_m2: 3000 + max_load_t: 10000 + container: + weight_t: 20 + dimensions_m: [15, 3, 3] # LxWxH + crane: + capacity_t: 250 + hook_height_m: 40 + speed_mpm: 10 # <<< + actions: + transport_components: {} + install_anchor: {} + retrieve_anchor: {} + install_wec: {} + day_rate: 147239 # USD/day taken from Orbit: https://github.com/WISDEM/ORBIT/blob/dev/library/vessels/floating_barge.yaml +# --- Rock Installation Vessel --- + +ROCK_FallPipe: +# Rock Installation Vessel (ROCK) – Placement of rock for scour protection, anchor stabilization, or seabed leveling. Uses fall-pipe or side-dump systems with high precision at depth. + type: ROCK + transport: + transit_speed_mps: 5.5 + Hs_m : 6 + station_keeping: + type: DP2 + capabilities: + deck_space: + area_m2: 1500 + max_load_t: 4000 + positioning_system: + accuracy_m: 0.3 + methods: [USBL, LBL, INS] + monitoring_system: + metrics: [berm_shape] + sampling_rate_hz: 5 + actions: + backfill_rockdump: {} + site_survey: {} + day_rate: x # USD/day (research needed) \ No newline at end of file