From 061058eb1a99a518ac3c8a71b23110f1209df938 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sun, 19 Feb 2023 12:48:53 -0700 Subject: [PATCH 01/84] NSGA-II implementation with properly printing optimal solutions at the iteration is completed. --- ravenframework/DataObjects/DataSet.py | 11 +- ravenframework/Optimizers/GeneticAlgorithm.py | 531 ++++++++++++++---- ravenframework/Optimizers/Optimizer.py | 11 +- ravenframework/Optimizers/RavenSampled.py | 212 +++++-- ravenframework/Optimizers/fitness/fitness.py | 34 ++ .../Optimizers/mutators/mutators.py | 51 +- .../parentSelectors/parentSelectors.py | 53 +- .../survivorSelectors/survivorSelectors.py | 98 +++- ravenframework/utils/frontUtils.py | 36 +- .../discrete/constrained/myConstraints.py | 108 ++++ .../discrete/constrained/myLocalSum_multi.py | 43 ++ 11 files changed, 943 insertions(+), 245 deletions(-) create mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/myConstraints.py create mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/myLocalSum_multi.py diff --git a/ravenframework/DataObjects/DataSet.py b/ravenframework/DataObjects/DataSet.py index 6dad7c88ff..a1c21f6f1d 100644 --- a/ravenframework/DataObjects/DataSet.py +++ b/ravenframework/DataObjects/DataSet.py @@ -213,7 +213,8 @@ def addRealization(self, rlz): indexMap = dict((key, val) for key, val in indexMap[0].items() if key in self.getVars()) # [0] because everything is nested in a list by now, it seems # clean out entries that aren't desired try: - rlz = dict((var, rlz[var]) for var in self.getVars() + self.indexes) + getVariables = self.getVars() + rlz = dict((var, rlz[var]) for var in getVariables + self.indexes) except KeyError as e: self.raiseAWarning('Variables provided:',rlz.keys()) self.raiseAnError(KeyError, f'Provided realization does not have all requisite values for object "{self.name}": "{e.args[0]}"') @@ -242,7 +243,8 @@ def addRealization(self, rlz): # This is because the cNDarray collector expects a LIST of realization, not a single realization. # Maybe the "append" method should be renamed to "extend" or changed to append one at a time. # set realizations as a list of realizations (which are ordered lists) - newData = np.array(list(rlz[var] for var in self._orderedVars)+[0.0], dtype=object) + orderedVariables = self._orderedVars + newData = np.array(list(rlz[var] for var in orderedVariables)+[0.0], dtype=object) newData = newData[:-1] # if data storage isn't set up, set it up if self._collector is None: @@ -1963,9 +1965,10 @@ def _setDataTypes(self, rlz): @ In, rlz, dict, standardized and formatted realization @ Out, None """ + getVariables = self.getVars() if self.types is None: - self.types = [None]*len(self.getVars()) - for v, name in enumerate(self.getVars()): + self.types = [None]*len(getVariables) + for v, name in enumerate(getVariables): val = rlz[name] self.types[v] = self._getCompatibleType(val) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 8f90200ec9..9654c09dc5 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -32,10 +32,11 @@ # External Modules End------------------------------------------------------------------------------ # Internal Modules---------------------------------------------------------------------------------- -from ..utils import mathUtils, InputData, InputTypes +from ..utils import mathUtils, InputData, InputTypes, frontUtils from ..utils.gaUtils import dataArrayToDict, datasetToDataArray from .RavenSampled import RavenSampled from .parentSelectors.parentSelectors import returnInstance as parentSelectionReturnInstance +from .parentSelectors.parentSelectors import countConstViolation from .crossOverOperators.crossovers import returnInstance as crossoversReturnInstance from .mutators.mutators import returnInstance as mutatorsReturnInstance from .survivorSelectors.survivorSelectors import returnInstance as survivorSelectionReturnInstance @@ -66,16 +67,26 @@ def __init__(self): self._acceptRerun = {} # by traj, if True then override accept for point rerun self._convergenceInfo = {} # by traj, the persistence and convergence information for most recent opt self._requiredPersistence = 0 # consecutive persistence required to mark convergence - self.needDenormalized() # the default in all optimizers is to normalize the data which is not the case here + self.needDenormalized() # the default in all optimizers is to normalize the data which is not the case here self.batchId = 0 - self.population = None # panda Dataset container containing the population at the beginning of each generation iteration - self.popAge = None # population age - self.fitness = None # population fitness - self.ahdp = np.NaN # p-Average Hausdorff Distance between populations - self.ahd = np.NaN # Hausdorff Distance between populations + self.population = None # panda Dataset container containing the population at the beginning of each generation iteration + self.popAge = None # population age + self.fitness = None # population fitness + self.rank = None # population rank (for Multi-objective optimization only) + self.constraints = None + self.constraintsV = None + self.crowdingDistance = None # population crowding distance (for Multi-objective optimization only) + self.ahdp = np.NaN # p-Average Hausdorff Distance between populations + self.ahd = np.NaN # Hausdorff Distance between populations self.bestPoint = None self.bestFitness = None self.bestObjective = None + self.multiBestPoint = None + self.multiBestFitness = None + self.multiBestObjective = None + self.multiBestConstraint = None + self.multiBestRank = None + self.multiBestCD = None self.objectiveVal = None self._populationSize = None self._parentSelectionType = None @@ -238,18 +249,11 @@ def getInputSpecification(cls): contentType=InputTypes.StringType, printPriority=108, descr=r"""a subnode containing the implemented fitness functions. - This includes: \begin{itemize} - \item invLinear: - \[fitness = -a \times obj - b \times \sum\\_{j=1}^{nConstraint} max(0,-penalty\\_j) \]. - - \item logistic: - \[fitness = \frac{1}{1+e^{a\times(obj-b)}}\]. - - \item - feasibleFirst: \[fitness = - -obj \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{for} \ \ g\\_j(x)\geq 0 \; \forall j\] and - \[fitness = -obj\\_{worst} - \Sigma\\_{j=1}^{J} \ \ \ \ \ \ \ \ otherwise \] - \end{itemize}.""") + This includes: a. invLinear: $fitness = -a \times obj - b \times \sum_{j=1}^{nConstraint} max(0,-penalty\_j) $. + + b. logistic: $fitness = \frac{1}{1+e^{a\times(obj-b)}}$. + + c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$""") fitness.addParam("type", InputTypes.StringType, True, descr=r"""[invLin, logistic, feasibleFirst]""") objCoeff = InputData.parameterInputFactory('a', strictMode=True, @@ -296,6 +300,8 @@ def getSolutionExportVariableNames(cls): new = {} # new = {'': 'the size of step taken in the normalized input space to arrive at each optimal point'} new['conv_{CONV}'] = 'status of each given convergence criteria' + new['rank'] = 'rank' + new['CD'] = 'crowding distance' new['fitness'] = 'fitness of the current chromosome' new['age'] = 'age of current chromosome' new['batchId'] = 'Id of the batch to whom the chromosome belongs' @@ -355,7 +361,7 @@ def handleInput(self, paramInput): # Check if the fitness requested is among the constrained optimization fitnesses # Currently, only InvLin and feasibleFirst Fitnesses deal with constrained optimization # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. - if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst']: + if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst','rank_crowding']: self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear and feasibleFirst fitnesses, whereas provided fitness is {self._fitnessType}') self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None @@ -418,9 +424,9 @@ def needDenormalized(self): # overload as needed in inheritors return True - ############### - # Run Methods # - ############### + ########################################################################################################## + # Run Methods # + ########################################################################################################## def _useRealization(self, info, rlz): """ @@ -438,82 +444,202 @@ def _useRealization(self, info, rlz): info['step'] = self.counter # Developer note: each algorithm step is indicated by a number followed by the generation number - # e.g., '5 @ n-1' refers to step 5 for generation n-1 (i.e., previous generation) + # e.g., '0 @ n-1' refers to step 0 for generation n-1 (i.e., previous generation) # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams - # 5 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) - - # 5.1 @ n-1: fitnessCalculation(rlz) + # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) + # 0.1 @ n-1: fitnessCalculation(rlz) # perform fitness calculation for newly obtained children (rlz) - offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) - objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar].data)) - - # collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) - constraintData = {} - if self._constraintFunctions or self._impConstraintFunctions: - params = [] - for y in (self._constraintFunctions + self._impConstraintFunctions): - params += y.parameterNames() - for p in list(set(params) -set([self._objectiveVar]) -set(list(self.toBeSampled.keys()))): - constraintData[p] = list(np.atleast_1d(rlz[p].data)) - # Compute constraint function g_j(x) for all constraints (j = 1 .. J) - # and all x's (individuals) in the population - g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) - - g = xr.DataArray(g0, - dims=['chromosome','Constraint'], - coords={'chromosome':np.arange(np.shape(offSprings)[0]), - 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) - # FIXME The constraint handling is following the structure of the RavenSampled.py, - # there are many utility functions that can be simplified and/or merged together - # _check, _handle, and _apply, for explicit and implicit constraints. - # This can be simplified in the near future in GradientDescent, SimulatedAnnealing, and here in GA - for index,individual in enumerate(offSprings): - newOpt = individual - opt = {self._objectiveVar:objectiveVal[index]} - for p, v in constraintData.items(): - opt[p] = v[index] - - for constIndex, constraint in enumerate(self._constraintFunctions + self._impConstraintFunctions): - if constraint in self._constraintFunctions: - g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint) - else: - g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) - offSpringFitness = self._fitnessInstance(rlz, - objVar=self._objectiveVar, - a=self._objCoeff, - b=self._penaltyCoeff, - penalty=None, - constraintFunction=g, - type=self._minMax) - - self._collectOptPoint(rlz, offSpringFitness, objectiveVal,g) - self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info) + if len(self._objectiveVar) == 1: # This is a single-objective Optimization case + offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) + objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar[0]].data)) + + # collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) + constraintData = {} + if self._constraintFunctions or self._impConstraintFunctions: + params = [] + for y in (self._constraintFunctions + self._impConstraintFunctions): + params += y.parameterNames() + for p in list(set(params) -set([self._objectiveVar[0]]) -set(list(self.toBeSampled.keys()))): + # for p in list(set(params) -set([self._objectiveVar]) -set(list(self.toBeSampled.keys()))): + constraintData[p] = list(np.atleast_1d(rlz[p].data)) + # Compute constraint function g_j(x) for all constraints (j = 1 .. J) + # and all x's (individuals) in the population + g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) + + g = xr.DataArray(g0, + dims=['chromosome','Constraint'], + coords={'chromosome':np.arange(np.shape(offSprings)[0]), + 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) + # FIXME The constraint handling is following the structure of the RavenSampled.py, + # there are many utility functions that can be simplified and/or merged together + # _check, _handle, and _apply, for explicit and implicit constraints. + # This can be simplified in the near future in GradientDescent, SimulatedAnnealing, and here in GA + for index,individual in enumerate(offSprings): + newOpt = individual + opt = {self._objectiveVar[0]:objectiveVal[index]} + for p, v in constraintData.items(): + opt[p] = v[index] + + for constIndex, constraint in enumerate(self._constraintFunctions + self._impConstraintFunctions): + if constraint in self._constraintFunctions: + g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint) + else: + g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) + + offSpringFitness = self._fitnessInstance(rlz, + objVar=self._objectiveVar[0], + a=self._objCoeff, + b=self._penaltyCoeff, + penalty=None, + constraintFunction=g, + type=self._minMax) + + self._collectOptPoint(rlz, offSpringFitness, objectiveVal, g) + self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info) + + else: # This is a multi-objective Optimization case + objectiveVal = [] + offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) + for i in range(len(self._objectiveVar)): + objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + # collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) + constraintData = {} + if self._constraintFunctions or self._impConstraintFunctions: + params = [] + for y in (self._constraintFunctions + self._impConstraintFunctions): + params += y.parameterNames() + for p in list(set(params) -set(self._objectiveVar) -set(list(self.toBeSampled.keys()))): + constraintData[p] = list(np.atleast_1d(rlz[p].data)) + # Compute constraint function g_j(x) for all constraints (j = 1 .. J) + # and all x's (individuals) in the population + g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) + + g = xr.DataArray(g0, + dims=['chromosome','Constraint'], + coords={'chromosome':np.arange(np.shape(offSprings)[0]), + 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) + + for index,individual in enumerate(offSprings): + newOpt = individual + opt = dict(zip(self._objectiveVar, [item[index] for item in objectiveVal])) + for p, v in constraintData.items(): + opt[p] = v[index] + + for constIndex, constraint in enumerate(self._constraintFunctions + self._impConstraintFunctions): + if constraint in self._constraintFunctions: + g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint) + else: + g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) + + Fitness = np.zeros((len(offSprings), 1)) + for i in range(len(Fitness)): + Fitness[i] = countConstViolation(g.data[i]) + Fitness = [item for sublist in Fitness.tolist() for item in sublist] + + Fitness = xr.DataArray(Fitness, + dims=['NumOfConstraintViolated'], + coords={'NumOfConstraintViolated':np.arange(np.shape(Fitness)[0])}) + + # 0.2@ n-1: Survivor selection(rlz) + # update population container given obtained children if self._activeTraj: - # 5.2@ n-1: Survivor selection(rlz) - # update population container given obtained children - if self.counter > 1: - self.population,self.fitness,age,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - fitness=self.fitness, - newRlz=rlz, - offSpringsFitness=offSpringFitness, - popObjectiveVal=self.objectiveVal) - self.popAge = age - else: - self.population = offSprings - self.fitness = offSpringFitness - self.objectiveVal = rlz[self._objectiveVar].data + if len(self._objectiveVar) == 1: # If the number of objectives is just 1: + if self.counter > 1: + self.population, self.fitness,\ + age,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + fitness=self.fitness, + newRlz=rlz, + offSpringsFitness=offSpringFitness, + popObjectiveVal=self.objectiveVal) + self.popAge = age + else: + self.population = offSprings + self.fitness = offSpringFitness + self.objectiveVal = rlz[self._objectiveVar[0]].data + + else: # If the number of objectives is more than 1: + if self.counter > 1: + self.population,self.rank, \ + self.popAge,self.crowdingDistance, \ + self.objectiveVal,self.constraints, \ + self.constraintsV = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + offsprings=rlz, + popObjectiveVal=self.objectiveVal, + offObjectiveVal=objectiveVal, + popConst = self.constraints, + offConst = Fitness, + popConstV = self.constraintsV, + offConstV = g + ) + + + + self._collectOptPointMulti(self.population, + self.rank, + self.crowdingDistance, + self.objectiveVal, + self.constraints, + self.constraintsV) + self._resolveNewGenerationMulti(traj, rlz, info) + + ############################################################################## + objs_vals = [list(ele) for ele in list(zip(*self.objectiveVal))] + import matplotlib.pyplot as plt + # JY: Visualization: all points - This code block needs to be either deleted or revisited. + plt.plot(np.array(objs_vals)[:,0], np.array(objs_vals)[:,1],'*') + # plt.xlim(70,100) + # plt.ylim(5,20) + + # JY: Visualization: optimal points only - This code block needs to be either deleted or revisited. + plt.xlim(75,100) + plt.ylim(5,20) + plt.plot(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,0], + np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,1],'*') + # plt.xlim(75,100) + # plt.ylim(5,20) + for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,0])): + plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[i,0], + np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[i,1], str(self.batchId-1)) + # plt.pause() + ############################################################################## + + else: + self.population = offSprings + self.constraints = Fitness + self.constraintsV = g + self.rank, self.crowdingDistance = self._fitnessInstance(rlz, + objVals = self._objectiveVar + ) + self.objectiveVal = [] + for i in range(len(self._objectiveVar)): + self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) # 1 @ n: Parent selection from population # pair parents together by indexes - parents = self._parentSelectionInstance(self.population, - variables=list(self.toBeSampled), - fitness=self.fitness, - nParents=self._nParents) + + if len(self._objectiveVar) == 1: # If the number of objectives is just 1: + parents = self._parentSelectionInstance(self.population, + variables=list(self.toBeSampled), + fitness=self.fitness, + nParents=self._nParents) + + else: # This is for a multi-objective Optimization case + + parents = self._parentSelectionInstance(self.population, + variables=list(self.toBeSampled), + nParents=self._nParents, + rank = self.rank, + crowdDistance = self.crowdingDistance, + constraint = self.constraints + ) # 2 @ n: Crossover from set of parents # create childrenCoordinates (x1,...,xM) @@ -550,9 +676,9 @@ def _useRealization(self, info, rlz): children = children[:self._populationSize, :] daChildren = xr.DataArray(children, - dims=['chromosome','Gene'], - coords={'chromosome': np.arange(np.shape(children)[0]), - 'Gene':list(self.toBeSampled)}) + dims=['chromosome','Gene'], + coords={'chromosome': np.arange(np.shape(children)[0]), + 'Gene':list(self.toBeSampled)}) # 5 @ n: Submit children batch # submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates @@ -593,12 +719,20 @@ def flush(self): self.population = None self.popAge = None self.fitness = None + self.rank = None + self.crowdingDistance = None self.ahdp = np.NaN self.ahd = np.NaN self.bestPoint = None self.bestFitness = None self.bestObjective = None self.objectiveVal = None + self.multiBestPoint = None + self.multiBestFitness = None + self.multiBestObjective = None + self.multiBestConstraint = None + self.multiBestRank = None + self.multiBestCD = None # END queuing Runs # * * * * * * * * * * * * * * * * @@ -627,7 +761,7 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info): for i in range(rlz.sizes['RAVEN_sample_ID']): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) - rlzDict[self._objectiveVar] = np.atleast_1d(rlz[self._objectiveVar].data)[i] + rlzDict[self._objectiveVar[0]] = np.atleast_1d(rlz[self._objectiveVar[0]].data)[i] rlzDict['fitness'] = np.atleast_1d(fitness.data)[i] for ind, consName in enumerate(g['Constraint'].values): rlzDict['ConstraintEvaluation_'+consName] = g[i,ind] @@ -636,7 +770,7 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info): if acceptable in ['accepted', 'first']: # record history bestRlz = {} - bestRlz[self._objectiveVar] = self.bestObjective + bestRlz[self._objectiveVar[0]] = self.bestObjective bestRlz['fitness'] = self.bestFitness bestRlz.update(self.bestPoint) self._optPointHistory[traj].append((bestRlz, info)) @@ -645,6 +779,65 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info): else: # e.g. rerun pass # nothing to do, just keep moving + def _resolveNewGenerationMulti(self, traj, rlz, info): + """ + Store a new Generation after checking convergence + @ In, traj, int, trajectory for this new point + @ In, rlz, dict, realized realization + @ In, objectiveVal, list, objective values at each chromosome of the realization + @ In, fitness, xr.DataArray, fitness values at each chromosome of the realization + @ In, g, xr.DataArray, the constraint evaluation function + @ In, info, dict, identifying information about the realization + """ + self.raiseADebug('*'*80) + self.raiseADebug(f'Trajectory {traj} iteration {info["step"]} resolving new state ...') + # note the collection of the opt point + self._stepTracker[traj]['opt'] = (rlz, info) + acceptable = 'accepted' if self.counter > 1 else 'first' + old = self.population + converged = self._updateConvergence(traj, rlz, old, acceptable) + if converged: + self._closeTrajectory(traj, 'converge', 'converged', self.bestObjective) + # NOTE: the solution export needs to be updated BEFORE we run rejectOptPoint or extend the opt + # point history. + objVal = [[] for x in range(len(self.objectiveVal[0]))] + for i in range(len(self.objectiveVal[0])): + objVal[i] = [item[i] for item in self.objectiveVal] + + objVal = xr.DataArray(objVal, + dims=['chromosome','obj'], + coords={'chromosome':np.arange(np.shape(objVal)[0]), + 'obj': self._objectiveVar}) + if self._writeSteps == 'every': + for i in range(rlz.sizes['RAVEN_sample_ID']): + rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) + for j in range(len(self._objectiveVar)): + rlzDict[self._objectiveVar[j]] = objVal.data[i][j] + rlzDict['rank'] = np.atleast_1d(self.rank.data)[i] + rlzDict['CD'] = np.atleast_1d(self.crowdingDistance.data)[i] + rlzDict['fitness'] = np.atleast_1d(self.constraints.data)[i] + for ind, consName in enumerate([y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]): + rlzDict['ConstraintEvaluation_'+consName] = self.constraintsV.data[i,ind] + self._updateSolutionExport(traj, rlzDict, acceptable, None) + + # decide what to do next + if acceptable in ['accepted', 'first']: + # record history + bestRlz = {} + for i in range(len(self._objectiveVar)): + bestRlz[self._objectiveVar[i]] = [item[i] for item in self.multiBestObjective] + bestRlz['fitness'] = self.multiBestFitness + bestRlz['rank'] = self.multiBestRank + bestRlz['CD'] = self.multiBestCD + for ind, consName in enumerate(self.multiBestConstraint.Constraint): + bestRlz['ConstraintEvaluation_'+consName.values.tolist()] = self.multiBestConstraint[ind].values + bestRlz.update(self.multiBestPoint) + self._optPointHistory[traj].append((bestRlz, info)) + elif acceptable == 'rejected': + self._rejectOptPoint(traj, info, old) + else: # e.g. rerun + pass # nothing to do, just keep moving + def _collectOptPoint(self, rlz, fitness, objectiveVal, g): """ Collects the point (dict) from a realization @@ -669,6 +862,63 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g): return point + def _collectOptPointMulti(self, population, rank, CD, objectiveVal, constraints, constraintsV): + """ + Collects the point (dict) from a realization + @ In, population, Dataset, container containing the population + @ In, objectiveVal, list, objective values at each chromosome of the realization + @ In, rank, xr.DataArray, rank values at each chromosome of the realization + @ In, crowdingDistance, xr.DataArray, crowdingDistance values at each chromosome of the realization + @ Out, point, dict, point used in this realization + """ + objVal = [[] for x in range(len(objectiveVal[0]))] + for i in range(len(objectiveVal[0])): + objVal[i] = [item[i] for item in objectiveVal] + + optPointsConsIDX = [i for i, nFit in enumerate(constraints) if nFit == min(constraints)] # Find index of chromosome which has smallest numeber of violations among population + optPointsRankNConsIDX = [i for i, rankValue in enumerate(rank[optPointsConsIDX]) if rankValue == min(rank[optPointsConsIDX])] # Find index of chromosome which has smallest numeber of violations among population & smallest rank + + optPoints,optObjVal,optConstraints,optConstraintsV,optRank,optCD = population[optPointsRankNConsIDX], np.array(objVal)[optPointsRankNConsIDX], constraints.data[optPointsRankNConsIDX], constraintsV.data[optPointsRankNConsIDX], rank.data[optPointsRankNConsIDX], CD.data[optPointsRankNConsIDX] + + # Previous ################################################## + # points,multiFit,rankSorted,cdSorted,objSorted,constSorted = \ + # zip(*[[a,b,c,d,e,f] for a, b, c, d, e, f in sorted(zip(np.atleast_2d(population.data),np.atleast_1d(constraintsV.data),np.atleast_1d(rank.data),np.atleast_1d(CD.data), objVal, constraints), + # reverse=True,key=lambda x: (-x[1], -x[2], x[3]))]) + # optPoints = [points[i] for i, rank in enumerate(rankSorted) if rank == 1 ] + # optMultiFit = [multiFit[i] for i, rank in enumerate(rankSorted) if rank == 1 ] + # optObj = [objSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] + # optConst = [constSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] + # optRank = [rankSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] + # optCD = [cdSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] + # if (len(optMultiFit) != len([x for x in optMultiFit if x != 0]) ) : + # optPoints = [optPoints[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] + # optMultiFit = [x for x in optMultiFit if x == 0] + # optObj = [optObj[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] + # optConst = [optConst[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] + # optRank = [optRank[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] + # optCD = [optCD[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] + # Previous ################################################## + + optPointsDic = dict((var,np.array(optPoints)[:,i]) for i, var in enumerate(population.Gene.data)) + optConstNew = [] + for i in range(len(optConstraintsV)): + optConstNew.append(optConstraintsV[i]) + optConstNew = list(map(list, zip(*optConstNew))) + optConstNew = xr.DataArray(optConstNew, + dims=['Constraint','Evaluation'], + coords={'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)], + 'Evaluation':np.arange(np.shape(optConstNew)[1])}) + + self.multiBestPoint = optPointsDic + self.multiBestFitness = optConstraints + self.multiBestObjective = optObjVal + self.multiBestConstraint = optConstNew + self.multiBestRank = optRank + self.multiBestCD = optCD + + return optPointsDic + + def _checkAcceptability(self, traj): """ This is an abstract method for all RavenSampled Optimizer, whereas for GA all children are accepted @@ -685,16 +935,26 @@ def checkConvergence(self, traj, new, old): @ Out, any(convs.values()), bool, True of any of the convergence criteria was reached @ Out, convs, dict, on the form convs[conv] = bool, where conv is in self._convergenceCriteria """ - convs = {} - for conv in self._convergenceCriteria: - fName = conv[:1].upper() + conv[1:] - # get function from lookup - f = getattr(self, f'_checkConv{fName}') - # check convergence function - okay = f(traj, new=new, old=old) - # store and update - convs[conv] = okay - + if len(self._objectiveVar) == 1: + convs = {} + for conv in self._convergenceCriteria: + fName = conv[:1].upper() + conv[1:] + # get function from lookup + f = getattr(self, f'_checkConv{fName}') + # check convergence function + okay = f(traj, new=new, old=old) + # store and update + convs[conv] = okay + else: + convs = {} + for conv in self._convergenceCriteria: + fName = conv[:1].upper() + conv[1:] + # get function from lookup + f = getattr(self, f'_checkConv{fName}') + # check convergence function + okay = f(traj, new=new, old=old) + # store and update + convs[conv] = okay return any(convs.values()), convs def _checkConvObjective(self, traj, **kwargs): @@ -704,15 +964,28 @@ def _checkConvObjective(self, traj, **kwargs): @ In, kwargs, dict, dictionary of parameters for convergence criteria @ Out, converged, bool, convergence state """ - if len(self._optPointHistory[traj]) < 2: - return False - o1, _ = self._optPointHistory[traj][-1] - obj = o1[self._objectiveVar] - converged = (obj == self._convergenceCriteria['objective']) - self.raiseADebug(self.convFormat.format(name='objective', - conv=str(converged), - got=obj, - req=self._convergenceCriteria['objective'])) + if len(self._objectiveVar) == 1: # single objective optimization + if len(self._optPointHistory[traj]) < 2: + return False + o1, _ = self._optPointHistory[traj][-1] + obj = o1[self._objectiveVar[0]] + converged = (obj == self._convergenceCriteria['objective']) + self.raiseADebug(self.convFormat.format(name='objective', + conv=str(converged), + got=obj, + req=self._convergenceCriteria['objective'])) + else: # multi objective optimization + if len(self._optPointHistory[traj]) < 2: + return False + o1, _ = self._optPointHistory[traj][-1] + obj1 = o1[self._objectiveVar[0]] + obj2 = o1[self._objectiveVar[1]] + converged = (obj1 == self._convergenceCriteria['objective'] and obj2 == self._convergenceCriteria['objective']) + # JY: I stopped here. Codeline below needs to be revisited! 01/16/23 + # self.raiseADebug(self.convFormat.format(name='objective', + # conv=str(converged), + # got=obj1, + # req=self._convergenceCriteria['objective'])) return converged @@ -834,14 +1107,24 @@ def _updateConvergence(self, traj, new, old, acceptable): @ Out, converged, bool, True if converged on ANY criteria """ # NOTE we have multiple "if acceptable" trees here, as we need to update soln export regardless - if acceptable == 'accepted': - self.raiseADebug(f'Convergence Check for Trajectory {traj}:') - # check convergence - converged, convDict = self.checkConvergence(traj, new, old) - else: - converged = False - convDict = dict((var, False) for var in self._convergenceInfo[traj]) - self._convergenceInfo[traj].update(convDict) + if len(self._objectiveVar) == 1: # single-objective optimization + if acceptable == 'accepted': + self.raiseADebug(f'Convergence Check for Trajectory {traj}:') + # check convergence + converged, convDict = self.checkConvergence(traj, new, old) + else: + converged = False + convDict = dict((var, False) for var in self._convergenceInfo[traj]) + self._convergenceInfo[traj].update(convDict) + else: # multi-objective optimization + if acceptable == 'accepted': + self.raiseADebug(f'Convergence Check for Trajectory {traj}:') + # check convergence + converged, convDict = self.checkConvergence(traj, new, old) + else: + converged = False + convDict = dict((var, False) for var in self._convergenceInfo[traj]) + self._convergenceInfo[traj].update(convDict) return converged @@ -957,7 +1240,9 @@ def _addToSolutionExport(self, traj, rlz, acceptable): 'batchId': self.batchId, 'fitness': rlz['fitness'], 'AHDp': self.ahdp, - 'AHD': self.ahd} + 'AHD': self.ahd, + 'rank': 0 if len(self._objectiveVar) == 1 else rlz['rank'], + 'CD': 0 if len(self._objectiveVar) == 1 else rlz['CD']} for var, val in self.constants.items(): toAdd[var] = val diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index 517414d1dc..a3f99ed389 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -166,7 +166,7 @@ def __init__(self): self._cancelledTraj = {} # tracks cancelled trajectories, and reasons self._convergedTraj = {} # tracks converged trajectories, and values obtained self._numRepeatSamples = 1 # number of times to repeat sampling (e.g. denoising) - self._objectiveVar = None # objective variable for optimization + self._objectiveVar = [] # objective variable for optimization self._initialValuesFromInput = None # initial variable values from inputs, list of dicts (used to reset optimizer when re-running workflow) self._initialValues = None # initial variable values (trajectory starting locations), list of dicts self._variableBounds = None # dictionary of upper/lower bounds for each variable (may be inf?) @@ -247,8 +247,13 @@ def handleInput(self, paramInput): @ Out, None """ # the reading of variables (dist or func) and constants already happened in _readMoreXMLbase in Sampler - # objective var - self._objectiveVar = paramInput.findFirst('objective').value + + if bool(paramInput.findAll('GAparams')): + rawObjectiveVar = paramInput.findFirst('objective').value + self._objectiveVar = [rawObjectiveVar.split(",")[i] for i in range(0,len(rawObjectiveVar.split(","))) ] + + else: + self._objectiveVar = paramInput.findFirst('objective').value # sampler init # self.readSamplerInit() can't be used because it requires the xml node diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index d85181657c..39654feb70 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -301,7 +301,8 @@ def localFinalizeActualSampling(self, jobObject, model, myInput): # the sign of the objective function is flipped in case we do maximization # so get the correct-signed value into the realization if self._minMax == 'max': - rlz[self._objectiveVar] *= -1 + for i in range(len(self._objectiveVar)): + rlz[self._objectiveVar[i]] *= -1 # TODO FIXME let normalizeData work on an xr.DataSet (batch) not just a dictionary! rlz = self.normalizeData(rlz) self._useRealization(info, rlz) @@ -312,57 +313,145 @@ def finalizeSampler(self, failedRuns): @ In, failedRuns, list, runs that failed as part of this sampling @ Out, None """ - # get and print the best trajectory obtained - bestValue = None - bestTraj = None - bestPoint = None - s = -1 if self._minMax == 'max' else 1 - # check converged trajectories - self.raiseAMessage('*' * 80) - self.raiseAMessage('Optimizer Final Results:') - self.raiseADebug('') - self.raiseADebug(' - Trajectory Results:') - self.raiseADebug(' TRAJ STATUS VALUE') - statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}' - # print cancelled traj - for traj, info in self._cancelledTraj.items(): - val = info['value'] - status = info['reason'] - self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val)) - # check converged traj - for traj, info in self._convergedTraj.items(): + if len(self._objectiveVar) == 1: + # get and print the best trajectory obtained + bestValue = None + bestTraj = None + bestPoint = None + s = -1 if self._minMax == 'max' else 1 + # check converged trajectories + self.raiseAMessage('*' * 80) + self.raiseAMessage('Optimizer Final Results:') + self.raiseADebug('') + self.raiseADebug(' - Trajectory Results:') + self.raiseADebug(' TRAJ STATUS VALUE') + statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}' + # print cancelled traj + for traj, info in self._cancelledTraj.items(): + val = info['value'] + status = info['reason'] + self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val)) + # check converged traj + for traj, info in self._convergedTraj.items(): + opt = self._optPointHistory[traj][-1][0] + val = info['value'] + self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val)) + if bestValue is None or val < bestValue: + bestTraj = traj + bestValue = val + # further check active unfinished trajectories + # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler? + traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished? + # sanity check: if there's no history (we never got any answers) then report rather than crash + if len(self._optPointHistory[traj]) == 0: + self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + + 'Perhaps the Model failed?') opt = self._optPointHistory[traj][-1][0] - val = info['value'] - self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val)) + val = opt[self._objectiveVar[0]] + self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) if bestValue is None or val < bestValue: - bestTraj = traj bestValue = val - # further check active unfinished trajectories - # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler? - traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished? - # sanity check: if there's no history (we never got any answers) then report than rather than crash - if len(self._optPointHistory[traj]) == 0: - self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + - 'Perhaps the Model failed?') - opt = self._optPointHistory[traj][-1][0] - val = opt[self._objectiveVar] - self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) - if bestValue is None or val < bestValue: - bestValue = val - bestTraj = traj - bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0]) - bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled) - self.raiseADebug('') - self.raiseAMessage(' - Final Optimal Point:') - finalTemplate = ' {name:^20s} {value: 1.3e}' - finalTemplateInt = ' {name:^20s} {value: 3d}' - self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue)) - self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) - for var, val in bestPoint.items(): - self.raiseAMessage(finalTemplate.format(name=var, value=val)) - self.raiseAMessage('*' * 80) - # write final best solution to soln export - self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None') + bestTraj = traj + bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0]) + bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled) + self.raiseADebug('') + self.raiseAMessage(' - Final Optimal Point:') + finalTemplate = ' {name:^20s} {value: 1.3e}' + finalTemplateInt = ' {name:^20s} {value: 3d}' + self.raiseAMessage(finalTemplate.format(name=self._objectiveVar[0], value=s * bestValue)) + self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) + for var, val in bestPoint.items(): + self.raiseAMessage(finalTemplate.format(name=var, value=val)) + self.raiseAMessage('*' * 80) + # write final best solution to soln export + self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None') + else: + # get and print the best trajectory obtained + bestValue = None + bestTraj = None + bestPoint = None + s = -1 if self._minMax == 'max' else 1 + # check converged trajectories + self.raiseAMessage('*' * 80) + self.raiseAMessage('Optimizer Final Results:') + self.raiseADebug('') + self.raiseADebug(' - Trajectory Results:') + self.raiseADebug(' TRAJ STATUS VALUE') + statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}' + statusTemplate_multi = ' {traj:2d} {status:^11s} {val1: ^11s} {val2: ^11s}' + + # print cancelled traj + for traj, info in self._cancelledTraj.items(): + val = info['value'] + status = info['reason'] + self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val)) + # check converged traj + for traj, info in self._convergedTraj.items(): + opt = self._optPointHistory[traj][-1][0] + val = info['value'] + self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val)) + if bestValue is None or val < bestValue: + bestTraj = traj + bestValue = val + # further check active unfinished trajectories + # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler? + traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished? + # sanity check: if there's no history (we never got any answers) then report rather than crash + if len(self._optPointHistory[traj]) == 0: + self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + + 'Perhaps the Model failed?') + + for i in range(len(self._optPointHistory[traj][-1][0]['obj1'])): + opt = self._optPointHistory[traj][-1][0] + key = list(opt.keys()) + val = [item[i] for item in opt.values()] + optElm = {key[a]: val[a] for a in range(len(key))} + optVal = [s*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))] + # self.raiseADebug(statusTemplate_multi.format(status='active', traj=traj, val1=val1, val2=val2)) + # bestValue_1 = val1 + # bestValue_2 = val2 + bestTraj = traj + bestOpt = self.denormalizeData(optElm) + bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled) + # self.raiseADebug('') + # self.raiseAMessage(' - Final Optimal Point:') + # finalTemplate = ' {name_1:^20s} {name_2:^20s} {value_1:^20s} {value_2:^20s}' + # finalTemplateInt = ' {name:^20s} {value: 3d}' + # self.raiseAMessage(finalTemplate.format(name_1=self._objectiveVar[0], name_2=self._objectiveVar[1], value_1=bestValue_1, value_2=bestValue_2)) + # self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) + # JY: 23/01/20 two lines below are temperarily commented. If it is not needed, then will be deleted. + # for var, val in bestPoint.items(): + # self.raiseAMessage(finalTemplate.format(name=var, value=val)) + # self.raiseAMessage('*' * 80) + # write final best solution to soln export + self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None') + + # ### Original start ### + # opt = self._optPointHistory[traj][-1][0] + # val = range(len(self._objectiveVar)) + # for i in range(len(val)): + # val[i] = [', '.join(map(str,(s*opt[self._objectiveVar[i]])))] + # # val1 = ', '.join(map(str,(s*opt[self._objectiveVar[0]]).tolist())) + # # val2 = ', '.join(map(str,(s*opt[self._objectiveVar[1]]).tolist())) + # # self.raiseADebug(statusTemplate_multi.format(status='active', traj=traj, val1=val1, val2=val2)) + # # bestValue_1 = val1 + # # bestValue_2 = val2 + # bestTraj = traj + # bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0]) + # bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled) + # self.raiseADebug('') + # self.raiseAMessage(' - Final Optimal Point:') + # # finalTemplate = ' {name_1:^20s} {name_2:^20s} {value_1:^20s} {value_2:^20s}' + # # finalTemplateInt = ' {name:^20s} {value: 3d}' + # # self.raiseAMessage(finalTemplate.format(name_1=self._objectiveVar[0], name_2=self._objectiveVar[1], value_1=bestValue_1, value_2=bestValue_2)) + # # self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) + # # JY: 23/01/20 two lines below are temperarily commented. If it is not needed, then will be deleted. + # # for var, val in bestPoint.items(): + # # self.raiseAMessage(finalTemplate.format(name=var, value=val)) + # self.raiseAMessage('*' * 80) + # # write final best solution to soln export + # self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None') + # ### Original end ### def flush(self): """ @@ -498,10 +587,10 @@ def _handleImplicitConstraints(self, previous): @ Out, accept, bool, whether point was satisfied implicit constraints """ normed = copy.deepcopy(previous) - oldVal = normed[self._objectiveVar] - normed.pop(self._objectiveVar, oldVal) + oldVal = normed[self._objectiveVar[0]] + normed.pop(self._objectiveVar[0], oldVal) denormed = self.denormalizeData(normed) - denormed[self._objectiveVar] = oldVal + denormed[self._objectiveVar[0]] = oldVal accept = self._checkImpFunctionalConstraints(denormed) return accept @@ -569,9 +658,9 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): # TODO could we ever use old rerun gradients to inform the gradient direction as well? self._rerunsSinceAccept[traj] += 1 N = self._rerunsSinceAccept[traj] + 1 - oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar] + oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]] newAvg = ((N-1)*oldVal + optVal) / N - self._optPointHistory[traj][-1][0][self._objectiveVar] = newAvg + self._optPointHistory[traj][-1][0][self._objectiveVar[0]] = newAvg else: self.raiseAnError(f'Unrecognized acceptability: "{acceptable}"') @@ -635,10 +724,17 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): 'rejectReason': rejectReason }) # optimal point input and output spaces - objValue = rlz[self._objectiveVar] - if self._minMax == 'max': - objValue *= -1 - toExport[self._objectiveVar] = objValue + if len(self._objectiveVar) == 1: # Single Objective Optimization + objValue = rlz[self._objectiveVar[0]] + if self._minMax == 'max': + objValue *= -1 + toExport[self._objectiveVar[0]] = objValue + else: # Multi Objective Optimization + for i in range(len(self._objectiveVar)): + objValue = rlz[self._objectiveVar[i]] + if self._minMax == 'max': + objValue *= -1 + toExport[self._objectiveVar[i]] = objValue toExport.update(self.denormalizeData(dict((var, rlz[var]) for var in self.toBeSampled))) # constants and functions toExport.update(self.constants) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 53a27ff1c6..6158537cb1 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -18,6 +18,9 @@ Created June,16,2020 @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi """ +# Internal Modules---------------------------------------------------------------------------------- +from ...utils import frontUtils + # External Imports import numpy as np import xarray as xr @@ -75,6 +78,36 @@ def invLinear(rlz,**kwargs): coords={'chromosome': np.arange(len(data))}) return fitness +def rank_crowding(rlz,**kwargs): + r""" + Multiobjective optimization using NSGA-II requires the rank and crowding distance values to the objective function + + @ In, rlz, xr.Dataset, containing the evaluation of a certain + set of individuals (can be the initial population for the very first iteration, + or a population of offsprings) + @ In, kwargs, dict, dictionary of parameters for this rank_crowding method: + objVar, string, the names of the objective variables + @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome. + offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome. + """ + objectiveVal = [] + for i in range(len(kwargs['objVals'])): + objectiveVal.append(list(np.atleast_1d(rlz[kwargs['objVals'][i]].data))) + + offspringObjsVals = [list(ele) for ele in list(zip(*objectiveVal))] + + offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringObjsVals)) + offSpringRank = xr.DataArray(offSpringRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(offSpringRank)[0])}) + + offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, popSize=len(offSpringRank), objectives=np.array(offspringObjsVals)) + offSpringCD = xr.DataArray(offSpringCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) + + return offSpringRank, offSpringCD + def feasibleFirst(rlz,**kwargs): r""" Efficient Parameter-less Feasible First Penalty Fitness method @@ -166,6 +199,7 @@ def logistic(rlz,**kwargs): __fitness['invLinear'] = invLinear __fitness['logistic'] = logistic __fitness['feasibleFirst'] = feasibleFirst +__fitness['rank_crowding'] = rank_crowding def returnInstance(cls, name): diff --git a/ravenframework/Optimizers/mutators/mutators.py b/ravenframework/Optimizers/mutators/mutators.py index 11d0aec836..d5e58e4661 100644 --- a/ravenframework/Optimizers/mutators/mutators.py +++ b/ravenframework/Optimizers/mutators/mutators.py @@ -33,14 +33,19 @@ def swapMutator(offSprings, distDict, **kwargs): E.g.: child=[a,b,c,d,e] --> b and d are selected --> child = [a,d,c,b,e] @ In, offSprings, xr.DataArray, children resulting from the crossover process - @ In, distDict, dict, dictionary containing distribution associated with each gene @ In, kwargs, dict, dictionary of parameters for this mutation method: locs, list, the 2 locations of the genes to be swapped mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur variables, list, variables names. @ Out, children, xr.DataArray, the mutated chromosome, i.e., the child. """ - loc1,loc2 = locationsGenerator(offSprings, kwargs['locs']) + if kwargs['locs'] == None: + locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False))) + loc1 = np.minimum(locs[0], locs[1]) + loc2 = np.maximum(locs[0], locs[1]) + else: + loc1 = np.minimum(kwargs['locs'][0], kwargs['locs'][1]) + loc2 = np.maximum(kwargs['locs'][0], kwargs['locs'][1]) # initializing children children = xr.DataArray(np.zeros((np.shape(offSprings))), @@ -64,7 +69,6 @@ def scrambleMutator(offSprings, distDict, **kwargs): This method performs the scramble mutator. For each child, a subset of genes is chosen and their values are shuffled randomly. @ In, offSprings, xr.DataArray, offsprings after crossover - @ In, distDict, dict, dictionary containing distribution associated with each gene @ In, kwargs, dict, dictionary of parameters for this mutation method: chromosome, numpy.array, the chromosome that will mutate to the new child locs, list, the locations of the genes to be randomly scrambled @@ -72,7 +76,12 @@ def scrambleMutator(offSprings, distDict, **kwargs): variables, list, variables names. @ Out, child, np.array, the mutated chromosome, i.e., the child. """ - loc1,loc2 = locationsGenerator(offSprings, kwargs['locs']) + if kwargs['locs'] == None: + locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False))) + locs.sort() + else: + locs = [kwargs['locs'][0], kwargs['locs'][1]] + locs.sort() # initializing children children = xr.DataArray(np.zeros((np.shape(offSprings))), @@ -85,9 +94,9 @@ def scrambleMutator(offSprings, distDict, **kwargs): children[i,j] = distDict[offSprings[i].coords['Gene'].values[j]].cdf(float(offSprings[i,j].values)) for i in range(np.shape(offSprings)[0]): - for ind,element in enumerate([loc1,loc2]): + for ind,element in enumerate(locs): if randomUtils.random(dim=1,samples=1)< kwargs['mutationProb']: - children[i,loc1:loc2+1] = randomUtils.randomPermutation(list(children.data[i,loc1:loc2+1]),None) + children[i,locs[0]:locs[-1]+1] = randomUtils.randomPermutation(list(children.data[i,locs[0]:locs[-1]+1]),None) for i in range(np.shape(offSprings)[0]): for j in range(np.shape(offSprings)[1]): @@ -102,7 +111,6 @@ def bitFlipMutator(offSprings, distDict, **kwargs): The gene to be flipped is completely random. The new value of the flipped gene is is completely random. @ In, offSprings, xr.DataArray, children resulting from the crossover process - @ In, distDict, dict, dictionary containing distribution associated with each gene @ In, kwargs, dict, dictionary of parameters for this mutation method: mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur @ Out, offSprings, xr.DataArray, children resulting from the crossover process @@ -128,7 +136,6 @@ def randomMutator(offSprings, distDict, **kwargs): """ This method is designed to randomly mutate a single gene in each chromosome with probability = mutationProb. @ In, offSprings, xr.DataArray, children resulting from the crossover process - @ In, distDict, dict, dictionary containing distribution associated with each gene @ In, kwargs, dict, dictionary of parameters for this mutation method: mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur @ Out, offSprings, xr.DataArray, children resulting from the crossover process @@ -155,13 +162,18 @@ def inversionMutator(offSprings, distDict, **kwargs): E.g. given chromosome C = [0,1,2,3,4,5,6,7,8,9] and sampled locL=2 locU=6; New chromosome C' = [0,1,6,5,4,3,2,7,8,9] @ In, offSprings, xr.DataArray, children resulting from the crossover process - @ In, distDict, dict, dictionary containing distribution associated with each gene @ In, kwargs, dict, dictionary of parameters for this mutation method: mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur @ Out, offSprings, xr.DataArray, children resulting from the crossover process """ # sample gene locations: i.e., determine locL and locU - locL,locU = locationsGenerator(offSprings, kwargs['locs']) + if kwargs['locs'] == None: + locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False))) + locL = np.minimum(locs[0], locs[1]) + locU = np.maximum(locs[0], locs[1]) + else: + locL = np.minimum(kwargs['locs'][0], kwargs['locs'][1]) + locU = np.maximum(kwargs['locs'][0], kwargs['locs'][1]) for child in offSprings: # the mutation is performed for each child independently @@ -170,7 +182,7 @@ def inversionMutator(offSprings, distDict, **kwargs): seq = np.arange(locL,locU+1) allElems = [] for i,elem in enumerate(seq): - allElems.append(distDict[child.coords['Gene'].values[i]].cdf(float(child[elem].values))) + allElems.append(distDict[child.coords['Gene'].values[i]].cdf(float(child[elem].values))) mirrSeq = allElems[::-1] mirrElems = [] @@ -181,23 +193,6 @@ def inversionMutator(offSprings, distDict, **kwargs): return offSprings -def locationsGenerator(offSprings,locs): - """ - Methods designed to process the locations for the mutators. These locations can be either user specified or - randomly generated. - @ In, offSprings, xr.DataArray, children resulting from the crossover process - @ In, locs, list, the two locations of the genes to be swapped - @ Out, loc1, loc2, int, the two ordered processed locations required by the mutators - """ - if locs == None: - locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False))) - loc1 = np.minimum(locs[0], locs[1]) - loc2 = np.maximum(locs[0], locs[1]) - else: - loc1 = np.minimum(locs[0], locs[1]) - loc2 = np.maximum(locs[0], locs[1]) - return loc1, loc2 - __mutators = {} __mutators['swapMutator'] = swapMutator __mutators['scrambleMutator'] = scrambleMutator diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py index fc82522271..66bf37932f 100644 --- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py +++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py @@ -78,6 +78,9 @@ def rouletteWheel(population,**kwargs): fitness = np.delete(fitness,counter,axis=0) return selectedParent +def countConstViolation(const): + return sum(1 for i in const if i < 0) + def tournamentSelection(population,**kwargs): """ Tournament Selection mechanism for parent selection @@ -88,21 +91,25 @@ def tournamentSelection(population,**kwargs): variables, list, variable names @ Out, newPopulation, xr.DataArray, selected parents, """ - fitness = kwargs['fitness'] + nParents= kwargs['nParents'] pop = population popSize = population.values.shape[0] - if 'rank' in kwargs: + if 'rank' in kwargs.keys(): # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to rank = kwargs['rank'] + crowdDistance = kwargs['crowdDistance'] + constraintInfo = kwargs['constraint'] multiObjectiveRanking = True - matrixOperationRaw = np.zeros((popSize,3)) + matrixOperationRaw = np.zeros((popSize, 4)) matrixOperationRaw[:,0] = np.transpose(np.arange(popSize)) - matrixOperationRaw[:,1] = np.transpose(fitness.data) + matrixOperationRaw[:,1] = np.transpose(crowdDistance.data) matrixOperationRaw[:,2] = np.transpose(rank.data) - matrixOperation = np.zeros((popSize,3)) + matrixOperationRaw[:,3] = np.transpose(constraintInfo.data) + matrixOperation = np.zeros((popSize,len(matrixOperationRaw[0]))) else: + fitness = kwargs['fitness'] multiObjectiveRanking = False matrixOperationRaw = np.zeros((popSize,2)) matrixOperationRaw[:,0] = np.transpose(np.arange(popSize)) @@ -118,30 +125,32 @@ def tournamentSelection(population,**kwargs): for idx, val in enumerate(indexesShuffled): matrixOperation[idx,:] = matrixOperationRaw[val,:] - selectedParent = xr.DataArray( - np.zeros((nParents,np.shape(pop)[1])), - dims=['chromosome','Gene'], - coords={'chromosome':np.arange(nParents), - 'Gene': kwargs['variables']}) + selectedParent = xr.DataArray(np.zeros((nParents,np.shape(pop)[1])), + dims=['chromosome','Gene'], + coords={'chromosome':np.arange(nParents), + 'Gene': kwargs['variables']}) - if not multiObjectiveRanking: # single-objective implementation of tournamentSelection + if not multiObjectiveRanking: # single-objective implementation of tournamentSelection for i in range(nParents): if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: index = int(matrixOperation[2*i,0]) else: index = int(matrixOperation[2*i+1,0]) selectedParent[i,:] = pop.values[index,:] - else: # multi-objective implementation of tournamentSelection - for i in range(nParents-1): - if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]: - index = int(matrixOperation[i,0]) - elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: - index = int(matrixOperation[i+1,0]) - else: # same rank case - if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: - index = int(matrixOperation[i,0]) - else: - index = int(matrixOperation[i+1,0]) + else: # multi-objective implementation of tournamentSelection + for i in range(nParents): + if matrixOperation[2*i,3] > matrixOperation[2*i+1,3]: index = int(matrixOperation[2*i+1,0]) + elif matrixOperation[2*i,3] < matrixOperation[2*i+1,3]: index = int(matrixOperation[2*i,0]) + elif matrixOperation[2*i,3] == matrixOperation[2*i+1,3]: # if same number of constraints violations + if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]: + index = int(matrixOperation[2*i+1,0]) + elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: + index = int(matrixOperation[2*i,0]) + else: # same number of constraints and same rank case + if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: + index = int(matrixOperation[2*i,0]) + else: + index = int(matrixOperation[2*i+1,0]) selectedParent[i,:] = pop.values[index,:] return selectedParent diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index 1b754af494..6da9f67071 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -24,6 +24,7 @@ import numpy as np import xarray as xr +from ravenframework.utils import frontUtils # @profile def ageBased(newRlz,**kwargs): @@ -80,7 +81,7 @@ def fitnessBased(newRlz,**kwargs): It combines the parents and children/offsprings then keeps the fittest individuals to revert to the same population size. @ In, newRlz, xr.DataSet, containing either a single realization, or a batch of realizations. - @ In, kwargs, dict, dictionary of parameters for this mutation method: + @ In, kwargs, dict, dictionary of parameters for this survivor slection method: age, list, ages of each chromosome in the population of the previous generation offSpringsFitness, xr.DataArray, fitness of each new child, i.e., np.shape(offSpringsFitness) = nChildren x nGenes variables @@ -116,9 +117,9 @@ def fitnessBased(newRlz,**kwargs): newAge = sortedAgeT[:-len(offSprings)] newPopulationArray = xr.DataArray(newPopulationSorted, - dims=['chromosome','Gene'], - coords={'chromosome':np.arange(np.shape(newPopulationSorted)[0]), - 'Gene': kwargs['variables']}) + dims=['chromosome','Gene'], + coords={'chromosome':np.arange(np.shape(newPopulationSorted)[0]), + 'Gene': kwargs['variables']}) newFitness = xr.DataArray(newFitness, dims=['chromosome'], coords={'chromosome':np.arange(np.shape(newFitness)[0])}) @@ -126,9 +127,98 @@ def fitnessBased(newRlz,**kwargs): #return newPopulationArray,newFitness,newAge return newPopulationArray,newFitness,newAge,kwargs['popObjectiveVal'] +# @profile +def rankNcrowdingBased(offsprings, **kwargs): + """ + rankNcrowdingBased survivorSelection mechanism for new generation selection + It combines the parents and children/offsprings then calculates their rank and crowding distance. + After having ranks and crowding distance, it keeps the lowest ranks (and highest crowding distance if indivisuals have same rank. + @ In, newRlz, xr.DataSet, containing either a single realization, or a batch of realizations. + @ In, kwargs, dict, dictionary of parameters for this survivor slection method: + variables + population + @ Out, newPopulation, xr.DataArray, newPopulation for the new generation, i.e. np.shape(newPopulation) = populationSize x nGenes. + @ Out, newRank, xr.DataArray, rank of each chromosome in the new population + @ Out, newCD, xr.DataArray, crowding distance of each chromosome in the new population. + """ + popSize = np.shape(kwargs['population'])[0] + if ('age' not in kwargs.keys() or kwargs['age'] == None): + popAge = [0]*popSize + else: + popAge = kwargs['age'] + + population = np.atleast_2d(kwargs['population'].data) + offSprings = np.atleast_2d(offsprings[kwargs['variables']].to_array().transpose().data) + popObjectiveVal = kwargs['popObjectiveVal'] + offObjectiveVal = kwargs['offObjectiveVal'] + popConst = kwargs['popConst'].data + offConst = kwargs['offConst'].data + popConstV = kwargs['popConstV'].data + offConstV = kwargs['offConstV'].data + + newConstMerged = np.append(popConst, offConst) + newConstVMerged = np.array(popConstV.tolist() + offConstV.tolist()) + + newObjectivesMerged = np.array([i + j for i, j in zip(popObjectiveVal, offObjectiveVal)]) + newObjectivesMerged_pair = [list(ele) for ele in list(zip(*newObjectivesMerged))] + + newPopRank = frontUtils.rankNonDominatedFrontiers(np.array(newObjectivesMerged_pair)) + newPopRank = xr.DataArray(newPopRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(newPopRank)[0])}) + + newPopCD = frontUtils.crowdingDistance(rank=newPopRank, popSize=len(newPopRank), objectives=np.array(newObjectivesMerged_pair)) + newPopCD = xr.DataArray(newPopCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(newPopCD)[0])}) + + newAge = list(map(lambda x:x+1, popAge)) + newPopulationMerged = np.concatenate([population,offSprings]) + newAge.extend([0]*len(offSprings)) + + sortedConst,sortedRank,sortedCD,sortedAge,sortedPopulation,sortedObjectives,sortedConstV = \ + zip(*[(x,y,z,i,j,k,a) for x,y,z,i,j,k,a in \ + sorted(zip(newConstMerged,newPopRank.data,newPopCD.data,newAge,newPopulationMerged.tolist(),newObjectivesMerged_pair,newConstVMerged),reverse=False,key=lambda x: (x[0], x[1], -x[2]))]) + sortedConstT,sortedRankT,sortedCDT,sortedAgeT,sortedPopulationT,sortedObjectivesT,sortedConstVT = \ + np.atleast_1d(list(sortedConst)),np.atleast_1d(list(sortedRank)),list(sortedCD),list(sortedAge),np.atleast_1d(list(sortedPopulation)),np.atleast_1d(list(sortedObjectives)),np.atleast_1d(list(sortedConstV)) + + newPopulation = sortedPopulationT[:-len(offSprings)] + newObjectives = sortedObjectivesT[:-len(offSprings)] + + newRank = frontUtils.rankNonDominatedFrontiers(newObjectives) + newRank = xr.DataArray(newRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(newRank)[0])}) + + newObjectivesP = [list(ele) for ele in list(zip(*newObjectives))] + newCD = frontUtils.crowdingDistance(rank=newRank, popSize=len(newRank), objectives=newObjectives) + newCD = xr.DataArray(newCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(newCD)[0])}) + + newAge = sortedAgeT[:-len(offSprings)] + newConst = sortedConstT[:-len(offSprings)] + newConstV = sortedConstVT[:-len(offSprings)] + + newPopulationArray = xr.DataArray(newPopulation, + dims=['chromosome','Gene'], + coords={'chromosome':np.arange(np.shape(newPopulation)[0]), + 'Gene': kwargs['variables']}) + newConst = xr.DataArray(newConst, + dims=['NumOfConstViolated'], + coords={'NumOfConstViolated':np.arange(np.shape(newConst)[0])}) + + newConstV = xr.DataArray(newConstV, + dims=['chromosome','ConstEvaluation'], + coords={'chromosome':np.arange(np.shape(newPopulation)[0]), + 'ConstEvaluation':np.arange(np.shape(newConstV)[1])}) + + return newPopulationArray,newRank,newAge,newCD,newObjectivesP,newConst,newConstV + __survivorSelectors = {} __survivorSelectors['ageBased'] = ageBased __survivorSelectors['fitnessBased'] = fitnessBased +__survivorSelectors['rankNcrowdingBased'] = rankNcrowdingBased def returnInstance(cls, name): """ diff --git a/ravenframework/utils/frontUtils.py b/ravenframework/utils/frontUtils.py index c63eca57fc..8bb0e11c72 100644 --- a/ravenframework/utils/frontUtils.py +++ b/ravenframework/utils/frontUtils.py @@ -44,6 +44,7 @@ def nonDominatedFrontier(data, returnMask, minMask=None): Reference: the following code has been adapted from https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python """ + if minMask is None: pass elif minMask is not None and minMask.shape[0] != data.shape[1]: @@ -56,8 +57,8 @@ def nonDominatedFrontier(data, returnMask, minMask=None): isEfficient = np.arange(data.shape[0]) nPoints = data.shape[0] nextPointIndex = 0 - while nextPointIndex= 0, + so if: + 1) f(x,y) >= 0 then g = f + 2) f(x,y) >= a then g = f - a + 3) f(x,y) <= b then g = b - f + 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint) + """ + g = eval(Input.name)(Input) + return g + + +def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml# + """ + Let's assume that the constraint is: + $ x3+x4 < 8 $ + then g the constraint evaluation function (which has to be > 0) is taken to be: + g = 8 - (x3+x4) + in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 1 evaluation function + """ + g = 8 - Input.x3 - Input.x4 + return g + +def expConstr2(Input): + """ + Explicit Equality Constraint: + let's consider the constraint x1**2 + x2**2 = 25 + The way to write g is to use a very small number for instance, epsilon = 1e-12 + and then g = epsilon - abs(constraint) + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 2 evaluation function + """ + g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25) + return g + +def expConstr3(Input): + """ + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 3 evaluation function + """ + g = 10 - Input.x3 - Input.x4 + return g + +def impConstr1(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 1 evaluation function + """ + g = 10 - Input.x1**2 - Input.obj + return g + +def impConstr2(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 2 evaluation function + """ + g = Input.x1**2 + Input.obj - 10 + return g + +def impConstr3(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 3 evaluation function + """ + g = 100 - Input.obj1 + return g \ No newline at end of file diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/myLocalSum_multi.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/myLocalSum_multi.py new file mode 100644 index 0000000000..86ef17bdeb --- /dev/null +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/myLocalSum_multi.py @@ -0,0 +1,43 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Mohammad Abdo (@Jimmy-INL) + +def evaluate(Inputs): + Sum = 0 + LocalSum1 = 0 + LocalSum2 = 0 + # for ind,var in enumerate(Inputs.keys()): + # # write the objective function here + # Sum += (ind + 1) * Inputs[var] + # if (ind == 1): + # LocalSum1 = Sum + # return Sum[:], LocalSum1[:] + for ind,var in enumerate(Inputs.keys()): + # write the objective function here + Sum += (ind + 1) * Inputs[var] + if (ind == 0) or (ind == 1): + LocalSum1 += (ind + 1) * Inputs[var] + if (ind == 2) or (ind == 3): + LocalSum2 += (ind + 1) * Inputs[var] + return Sum[:], LocalSum1[:], LocalSum2[:] + +def run(self,Inputs): + """ + RAVEN API + @ In, self, object, RAVEN container + @ In, Inputs, dict, additional inputs + @ Out, None + """ + self.obj1,self.obj2,self.obj3 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3. From ab4315c884d4358b41f82ee6cead05acb77b203c Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sun, 19 Feb 2023 13:14:27 -0700 Subject: [PATCH 02/84] Unnecessary changes in DataSet.py have been removed. --- ravenframework/DataObjects/DataSet.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/ravenframework/DataObjects/DataSet.py b/ravenframework/DataObjects/DataSet.py index a1c21f6f1d..aff28737f9 100644 --- a/ravenframework/DataObjects/DataSet.py +++ b/ravenframework/DataObjects/DataSet.py @@ -213,8 +213,9 @@ def addRealization(self, rlz): indexMap = dict((key, val) for key, val in indexMap[0].items() if key in self.getVars()) # [0] because everything is nested in a list by now, it seems # clean out entries that aren't desired try: - getVariables = self.getVars() - rlz = dict((var, rlz[var]) for var in getVariables + self.indexes) + rlz = dict((var, rlz[var]) for var in self.getVars() + self.indexes) + # getVariables = self.getVars() + # rlz = dict((var, rlz[var]) for var in getVariables + self.indexes) except KeyError as e: self.raiseAWarning('Variables provided:',rlz.keys()) self.raiseAnError(KeyError, f'Provided realization does not have all requisite values for object "{self.name}": "{e.args[0]}"') @@ -243,8 +244,9 @@ def addRealization(self, rlz): # This is because the cNDarray collector expects a LIST of realization, not a single realization. # Maybe the "append" method should be renamed to "extend" or changed to append one at a time. # set realizations as a list of realizations (which are ordered lists) - orderedVariables = self._orderedVars - newData = np.array(list(rlz[var] for var in orderedVariables)+[0.0], dtype=object) + newData = np.array(list(rlz[var] for var in self._orderedVars)+[0.0], dtype=object) + # orderedVariables = self._orderedVars + # newData = np.array(list(rlz[var] for var in orderedVariables)+[0.0], dtype=object) newData = newData[:-1] # if data storage isn't set up, set it up if self._collector is None: @@ -1967,8 +1969,10 @@ def _setDataTypes(self, rlz): """ getVariables = self.getVars() if self.types is None: - self.types = [None]*len(getVariables) - for v, name in enumerate(getVariables): + self.types = [None]*len(self.getVars()) + for v, name in enumerate(self.getVars()): + # self.types = [None]*len(getVariables) + # for v, name in enumerate(getVariables): val = rlz[name] self.types[v] = self._getCompatibleType(val) From 8b7f5d3b99dac73def7dd7bdfd58b54a26236ee4 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sun, 19 Feb 2023 13:18:26 -0700 Subject: [PATCH 03/84] Unnecessary changes in DataSet.py have been removed. --- ravenframework/DataObjects/DataSet.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/ravenframework/DataObjects/DataSet.py b/ravenframework/DataObjects/DataSet.py index aff28737f9..6dad7c88ff 100644 --- a/ravenframework/DataObjects/DataSet.py +++ b/ravenframework/DataObjects/DataSet.py @@ -214,8 +214,6 @@ def addRealization(self, rlz): # clean out entries that aren't desired try: rlz = dict((var, rlz[var]) for var in self.getVars() + self.indexes) - # getVariables = self.getVars() - # rlz = dict((var, rlz[var]) for var in getVariables + self.indexes) except KeyError as e: self.raiseAWarning('Variables provided:',rlz.keys()) self.raiseAnError(KeyError, f'Provided realization does not have all requisite values for object "{self.name}": "{e.args[0]}"') @@ -245,8 +243,6 @@ def addRealization(self, rlz): # Maybe the "append" method should be renamed to "extend" or changed to append one at a time. # set realizations as a list of realizations (which are ordered lists) newData = np.array(list(rlz[var] for var in self._orderedVars)+[0.0], dtype=object) - # orderedVariables = self._orderedVars - # newData = np.array(list(rlz[var] for var in orderedVariables)+[0.0], dtype=object) newData = newData[:-1] # if data storage isn't set up, set it up if self._collector is None: @@ -1967,12 +1963,9 @@ def _setDataTypes(self, rlz): @ In, rlz, dict, standardized and formatted realization @ Out, None """ - getVariables = self.getVars() if self.types is None: self.types = [None]*len(self.getVars()) for v, name in enumerate(self.getVars()): - # self.types = [None]*len(getVariables) - # for v, name in enumerate(getVariables): val = rlz[name] self.types[v] = self._getCompatibleType(val) From 3fcde82ecfe166b53618c93f2adee23da423d464 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Wed, 22 Feb 2023 12:14:10 -0700 Subject: [PATCH 04/84] ZDT test is added. --- ravenframework/Optimizers/GeneticAlgorithm.py | 30 +++++++------- .../NSGAII/discrete/constrained/ZDT_model.py | 41 +++++++++++++++++++ 2 files changed, 57 insertions(+), 14 deletions(-) create mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 9654c09dc5..838c8c79c4 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -540,8 +540,8 @@ def _useRealization(self, info, rlz): Fitness = [item for sublist in Fitness.tolist() for item in sublist] Fitness = xr.DataArray(Fitness, - dims=['NumOfConstraintViolated'], - coords={'NumOfConstraintViolated':np.arange(np.shape(Fitness)[0])}) + dims=['NumOfConstraintViolated'], + coords={'NumOfConstraintViolated':np.arange(np.shape(Fitness)[0])}) # 0.2@ n-1: Survivor selection(rlz) # update population container given obtained children @@ -595,16 +595,16 @@ def _useRealization(self, info, rlz): import matplotlib.pyplot as plt # JY: Visualization: all points - This code block needs to be either deleted or revisited. plt.plot(np.array(objs_vals)[:,0], np.array(objs_vals)[:,1],'*') - # plt.xlim(70,100) - # plt.ylim(5,20) # JY: Visualization: optimal points only - This code block needs to be either deleted or revisited. - plt.xlim(75,100) - plt.ylim(5,20) - plt.plot(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,0], - np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,1],'*') # plt.xlim(75,100) # plt.ylim(5,20) + plt.xlim(0,1) + plt.ylim(0,6) + plt.title(str('Iteration ' + str(self.counter-1))) + + plt.plot(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,0], + np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,1],'*') for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,0])): plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[i,0], np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[i,1], str(self.batchId-1)) @@ -829,8 +829,9 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): bestRlz['fitness'] = self.multiBestFitness bestRlz['rank'] = self.multiBestRank bestRlz['CD'] = self.multiBestCD - for ind, consName in enumerate(self.multiBestConstraint.Constraint): - bestRlz['ConstraintEvaluation_'+consName.values.tolist()] = self.multiBestConstraint[ind].values + if len(self.multiBestConstraint) != 0: # No constraints + for ind, consName in enumerate(self.multiBestConstraint.Constraint): + bestRlz['ConstraintEvaluation_'+consName.values.tolist()] = self.multiBestConstraint[ind].values bestRlz.update(self.multiBestPoint) self._optPointHistory[traj].append((bestRlz, info)) elif acceptable == 'rejected': @@ -904,10 +905,11 @@ def _collectOptPointMulti(self, population, rank, CD, objectiveVal, constraints, for i in range(len(optConstraintsV)): optConstNew.append(optConstraintsV[i]) optConstNew = list(map(list, zip(*optConstNew))) - optConstNew = xr.DataArray(optConstNew, - dims=['Constraint','Evaluation'], - coords={'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)], - 'Evaluation':np.arange(np.shape(optConstNew)[1])}) + if (len(optConstNew)) != 0: + optConstNew = xr.DataArray(optConstNew, + dims=['Constraint','Evaluation'], + coords={'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)], + 'Evaluation':np.arange(np.shape(optConstNew)[1])}) self.multiBestPoint = optPointsDic self.multiBestFitness = optConstraints diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py new file mode 100644 index 0000000000..06433ea466 --- /dev/null +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py @@ -0,0 +1,41 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Junyung Kim (@JunyungKim-INL) and Mohammad Abdo (@Jimmy-INL) + +import math + +def evaluate(Inputs): + Sum = 0 + obj1 = 0 + + for ind,var in enumerate(Inputs.keys()): + # write the objective function here + if (ind == 0) : + obj1 += Inputs[var] + if (ind != 0): + Sum += Inputs[var] + g = 1 + (9/len(Inputs.keys())*Sum ) + h = 1 - math.sqrt(obj1/g) + obj2 = g*h + return obj1[:], obj2[:] + +def run(self,Inputs): + """ + RAVEN API + @ In, self, object, RAVEN container + @ In, Inputs, dict, additional inputs + @ Out, None + """ + self.obj1,self.obj2 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3. From 15debe4e9480662d149e3539da2cf0cc32b0f9e9 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Fri, 24 Feb 2023 12:14:31 -0700 Subject: [PATCH 05/84] Optimizer.py and RavenSampled.py are updated after having regression tests. Most of conflicts are resolved. --- ravenframework/Optimizers/Optimizer.py | 3 +- ravenframework/Optimizers/RavenSampled.py | 95 +++++++++---------- .../NSGAII/discrete/constrained/ZDT_model.py | 2 +- 3 files changed, 46 insertions(+), 54 deletions(-) diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index a3f99ed389..7621901b26 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -166,7 +166,8 @@ def __init__(self): self._cancelledTraj = {} # tracks cancelled trajectories, and reasons self._convergedTraj = {} # tracks converged trajectories, and values obtained self._numRepeatSamples = 1 # number of times to repeat sampling (e.g. denoising) - self._objectiveVar = [] # objective variable for optimization + # self._objectiveVar = [] # objective variable for optimization + self._objectiveVar = None # objective variable for optimization self._initialValuesFromInput = None # initial variable values from inputs, list of dicts (used to reset optimizer when re-running workflow) self._initialValues = None # initial variable values (trajectory starting locations), list of dicts self._variableBounds = None # dictionary of upper/lower bounds for each variable (may be inf?) diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 39654feb70..39a3d9e15d 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -301,8 +301,11 @@ def localFinalizeActualSampling(self, jobObject, model, myInput): # the sign of the objective function is flipped in case we do maximization # so get the correct-signed value into the realization if self._minMax == 'max': - for i in range(len(self._objectiveVar)): - rlz[self._objectiveVar[i]] *= -1 + if type(self._objectiveVar) == str: + rlz[self._objectiveVar] *= -1 + else: + for i in range(len(self._objectiveVar)): + rlz[self._objectiveVar[i]] *= -1 # TODO FIXME let normalizeData work on an xr.DataSet (batch) not just a dictionary! rlz = self.normalizeData(rlz) self._useRealization(info, rlz) @@ -401,57 +404,39 @@ def finalizeSampler(self, failedRuns): self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + 'Perhaps the Model failed?') - for i in range(len(self._optPointHistory[traj][-1][0]['obj1'])): + if type(self._objectiveVar) == str: opt = self._optPointHistory[traj][-1][0] - key = list(opt.keys()) - val = [item[i] for item in opt.values()] - optElm = {key[a]: val[a] for a in range(len(key))} - optVal = [s*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))] - # self.raiseADebug(statusTemplate_multi.format(status='active', traj=traj, val1=val1, val2=val2)) - # bestValue_1 = val1 - # bestValue_2 = val2 - bestTraj = traj - bestOpt = self.denormalizeData(optElm) + val = opt[self._objectiveVar] + self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) + if bestValue is None or val < bestValue: + bestValue = val + bestTraj = traj + bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0]) bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled) - # self.raiseADebug('') - # self.raiseAMessage(' - Final Optimal Point:') - # finalTemplate = ' {name_1:^20s} {name_2:^20s} {value_1:^20s} {value_2:^20s}' - # finalTemplateInt = ' {name:^20s} {value: 3d}' - # self.raiseAMessage(finalTemplate.format(name_1=self._objectiveVar[0], name_2=self._objectiveVar[1], value_1=bestValue_1, value_2=bestValue_2)) - # self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) - # JY: 23/01/20 two lines below are temperarily commented. If it is not needed, then will be deleted. - # for var, val in bestPoint.items(): - # self.raiseAMessage(finalTemplate.format(name=var, value=val)) - # self.raiseAMessage('*' * 80) + self.raiseADebug('') + self.raiseAMessage(' - Final Optimal Point:') + finalTemplate = ' {name:^20s} {value: 1.3e}' + finalTemplateInt = ' {name:^20s} {value: 3d}' + self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue)) + self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) + for var, val in bestPoint.items(): + self.raiseAMessage(finalTemplate.format(name=var, value=val)) + self.raiseAMessage('*' * 80) # write final best solution to soln export self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None') + else: + for i in range(len(self._optPointHistory[traj][-1][0][self._objectiveVar[0]])): + opt = self._optPointHistory[traj][-1][0] + key = list(opt.keys()) + val = [item[i] for item in opt.values()] + optElm = {key[a]: val[a] for a in range(len(key))} + optVal = [s*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))] + + bestTraj = traj + bestOpt = self.denormalizeData(optElm) + bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled) - # ### Original start ### - # opt = self._optPointHistory[traj][-1][0] - # val = range(len(self._objectiveVar)) - # for i in range(len(val)): - # val[i] = [', '.join(map(str,(s*opt[self._objectiveVar[i]])))] - # # val1 = ', '.join(map(str,(s*opt[self._objectiveVar[0]]).tolist())) - # # val2 = ', '.join(map(str,(s*opt[self._objectiveVar[1]]).tolist())) - # # self.raiseADebug(statusTemplate_multi.format(status='active', traj=traj, val1=val1, val2=val2)) - # # bestValue_1 = val1 - # # bestValue_2 = val2 - # bestTraj = traj - # bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0]) - # bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled) - # self.raiseADebug('') - # self.raiseAMessage(' - Final Optimal Point:') - # # finalTemplate = ' {name_1:^20s} {name_2:^20s} {value_1:^20s} {value_2:^20s}' - # # finalTemplateInt = ' {name:^20s} {value: 3d}' - # # self.raiseAMessage(finalTemplate.format(name_1=self._objectiveVar[0], name_2=self._objectiveVar[1], value_1=bestValue_1, value_2=bestValue_2)) - # # self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) - # # JY: 23/01/20 two lines below are temperarily commented. If it is not needed, then will be deleted. - # # for var, val in bestPoint.items(): - # # self.raiseAMessage(finalTemplate.format(name=var, value=val)) - # self.raiseAMessage('*' * 80) - # # write final best solution to soln export - # self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None') - # ### Original end ### + self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None') def flush(self): """ @@ -587,7 +572,10 @@ def _handleImplicitConstraints(self, previous): @ Out, accept, bool, whether point was satisfied implicit constraints """ normed = copy.deepcopy(previous) - oldVal = normed[self._objectiveVar[0]] + if type(self._objectiveVar) == str: + oldVal = normed[self._objectiveVar] + else: + oldVal = normed[self._objectiveVar[0]] normed.pop(self._objectiveVar[0], oldVal) denormed = self.denormalizeData(normed) denormed[self._objectiveVar[0]] = oldVal @@ -658,7 +646,10 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): # TODO could we ever use old rerun gradients to inform the gradient direction as well? self._rerunsSinceAccept[traj] += 1 N = self._rerunsSinceAccept[traj] + 1 - oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]] + if type(self._objectiveVar) == str: + oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar] + else: + oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]] newAvg = ((N-1)*oldVal + optVal) / N self._optPointHistory[traj][-1][0][self._objectiveVar[0]] = newAvg else: @@ -724,8 +715,8 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): 'rejectReason': rejectReason }) # optimal point input and output spaces - if len(self._objectiveVar) == 1: # Single Objective Optimization - objValue = rlz[self._objectiveVar[0]] + if type(self._objectiveVar) == str: # Single Objective Optimization + objValue = rlz[self._objectiveVar] if self._minMax == 'max': objValue *= -1 toExport[self._objectiveVar[0]] = objValue diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py index 06433ea466..ca53da447d 100644 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py @@ -38,4 +38,4 @@ def run(self,Inputs): @ In, Inputs, dict, additional inputs @ Out, None """ - self.obj1,self.obj2 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3. + self.obj1,self.obj2 = evaluate(Inputs) From 64510df566b655faf5e04b42c781e019bc4e034d Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Fri, 24 Feb 2023 12:25:13 -0700 Subject: [PATCH 06/84] minor update on Optimizer.py --- ravenframework/Optimizers/Optimizer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index 7621901b26..1cf5e2ae5e 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -166,7 +166,6 @@ def __init__(self): self._cancelledTraj = {} # tracks cancelled trajectories, and reasons self._convergedTraj = {} # tracks converged trajectories, and values obtained self._numRepeatSamples = 1 # number of times to repeat sampling (e.g. denoising) - # self._objectiveVar = [] # objective variable for optimization self._objectiveVar = None # objective variable for optimization self._initialValuesFromInput = None # initial variable values from inputs, list of dicts (used to reset optimizer when re-running workflow) self._initialValues = None # initial variable values (trajectory starting locations), list of dicts From b1f0c3f77e044e6ab3c21da4f3de2fa378c2eeac Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Fri, 10 Mar 2023 20:13:27 -0700 Subject: [PATCH 07/84] temporary fix, not the way I want --- ravenframework/Optimizers/GeneticAlgorithm.py | 3 ++- ravenframework/Optimizers/GradientDescent.py | 5 +++++ ravenframework/Optimizers/Optimizer.py | 12 +++--------- ravenframework/Optimizers/RavenSampled.py | 13 ++++++++----- ravenframework/Optimizers/SimulatedAnnealing.py | 5 +++++ 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 838c8c79c4..d358e1934c 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -108,6 +108,7 @@ def __init__(self): self._penaltyCoeff = None self._fitnessInstance = None self._repairInstance = None + self._canHandleMultiObjective = True ########################## # Initialization Methods # @@ -451,7 +452,7 @@ def _useRealization(self, info, rlz): # 0.1 @ n-1: fitnessCalculation(rlz) # perform fitness calculation for newly obtained children (rlz) - if len(self._objectiveVar) == 1: # This is a single-objective Optimization case + if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is a single-objective Optimization case offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar[0]].data)) diff --git a/ravenframework/Optimizers/GradientDescent.py b/ravenframework/Optimizers/GradientDescent.py index 452f579f4e..cd64cddc04 100644 --- a/ravenframework/Optimizers/GradientDescent.py +++ b/ravenframework/Optimizers/GradientDescent.py @@ -212,6 +212,7 @@ def __init__(self): self._followerProximity = 1e-2 # distance at which annihilation can start occurring, in ?normalized? space self._trajectoryFollowers = defaultdict(list) # map of trajectories to the trajectories following them self._functionalConstraintExplorationLimit = 500 # number of input-space explorations allowable for functional constraints + self._canHandleMultiObjective = False # Currently Gradient Descent cannot handle multiobjective optimization # __private # additional methods # register adaptive sample identification criteria @@ -338,6 +339,10 @@ def _useRealization(self, info, rlz): @ Out, None """ traj = info['traj'] + if not self._canHandleMultiObjective and len(self._objectiveVar) == 1: + self._objectiveVar = self._objectiveVar[0] + elif type(self._objectiveVar) == list: + self.raiseAnError(IOError, 'Gradient Descent does not support multiObjective optimization yet! objective variable must be a single variable for now!') optVal = rlz[self._objectiveVar] info['optVal'] = optVal purpose = info['purpose'] diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index 1cf5e2ae5e..011f6fd9d3 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -78,9 +78,9 @@ def getInputSpecification(cls): specs.description = 'Optimizers' # objective variable - specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringType, strictMode=True, + specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringListType, strictMode=True, printPriority=90, # more important than - descr=r"""Name of the response variable (or ``objective function'') that should be optimized + descr=r"""Name of the objective variable (or ``objective function'') that should be optimized (minimized or maximized).""")) # modify Sampler variable nodes @@ -247,13 +247,7 @@ def handleInput(self, paramInput): @ Out, None """ # the reading of variables (dist or func) and constants already happened in _readMoreXMLbase in Sampler - - if bool(paramInput.findAll('GAparams')): - rawObjectiveVar = paramInput.findFirst('objective').value - self._objectiveVar = [rawObjectiveVar.split(",")[i] for i in range(0,len(rawObjectiveVar.split(","))) ] - - else: - self._objectiveVar = paramInput.findFirst('objective').value + self._objectiveVar = paramInput.findFirst('objective').value # sampler init # self.readSamplerInit() can't be used because it requires the xml node diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 39a3d9e15d..7d761561f7 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -301,11 +301,14 @@ def localFinalizeActualSampling(self, jobObject, model, myInput): # the sign of the objective function is flipped in case we do maximization # so get the correct-signed value into the realization if self._minMax == 'max': - if type(self._objectiveVar) == str: + if not self._canHandleMultiObjective and len(self._objectiveVar) == 1: + self._objectiveVar = self._objectiveVar[0] rlz[self._objectiveVar] *= -1 - else: + elif type(self._objectiveVar) == list: for i in range(len(self._objectiveVar)): rlz[self._objectiveVar[i]] *= -1 + else: + rlz[self._objectiveVar] *= -1 # TODO FIXME let normalizeData work on an xr.DataSet (batch) not just a dictionary! rlz = self.normalizeData(rlz) self._useRealization(info, rlz) @@ -316,7 +319,7 @@ def finalizeSampler(self, failedRuns): @ In, failedRuns, list, runs that failed as part of this sampling @ Out, None """ - if len(self._objectiveVar) == 1: + if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # get and print the best trajectory obtained bestValue = None bestTraj = None @@ -350,7 +353,7 @@ def finalizeSampler(self, failedRuns): self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + 'Perhaps the Model failed?') opt = self._optPointHistory[traj][-1][0] - val = opt[self._objectiveVar[0]] + val = opt[self._objectiveVar] self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) if bestValue is None or val < bestValue: bestValue = val @@ -719,7 +722,7 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): objValue = rlz[self._objectiveVar] if self._minMax == 'max': objValue *= -1 - toExport[self._objectiveVar[0]] = objValue + toExport[self._objectiveVar] = objValue else: # Multi Objective Optimization for i in range(len(self._objectiveVar)): objValue = rlz[self._objectiveVar[i]] diff --git a/ravenframework/Optimizers/SimulatedAnnealing.py b/ravenframework/Optimizers/SimulatedAnnealing.py index 03f1ba1445..ab47591812 100644 --- a/ravenframework/Optimizers/SimulatedAnnealing.py +++ b/ravenframework/Optimizers/SimulatedAnnealing.py @@ -190,6 +190,7 @@ def __init__(self): self._coolingMethod = None # initializing cooling method self._coolingParameters = {} # initializing the cooling schedule parameters self.info = {} + self._canHandleMultiObjective = False # Currently Simulated Annealing can only handle single objective def handleInput(self, paramInput): """ @@ -300,6 +301,10 @@ def _useRealization(self, info, rlz): @ Out, None """ traj = info['traj'] + if len(self._objectiveVar) == 1: + self._objectiveVar = self._objectiveVar[0] + elif type(self._objectiveVar) == list: + self.raiseAnError(IOError, 'Simulated Annealing does not support multiObjective yet! objective variable must be a single variable for now!') info['optVal'] = rlz[self._objectiveVar] self.incrementIteration(traj) self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar], info) From 52389c389fbeedc582a3c7c49b8722dc14f251f1 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Mon, 13 Mar 2023 10:30:09 -0600 Subject: [PATCH 08/84] NSGA-II testing fiels (multiSum wConstratint and ZDT1) are added. --- ravenframework/Optimizers/RavenSampled.py | 4 +- .../MultiSumwConst/MinwoRepMultiObjective.xml | 160 ++++++++++ .../{ => MultiSumwConst}/myConstraints.py | 0 .../{ => MultiSumwConst}/myLocalSum_multi.py | 0 .../NSGAII/discrete/constrained/ZDT1/ZDT1.xml | 295 ++++++++++++++++++ .../constrained/{ => ZDT1}/ZDT_model.py | 0 6 files changed, 457 insertions(+), 2 deletions(-) create mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml rename tests/framework/Optimizers/NSGAII/discrete/constrained/{ => MultiSumwConst}/myConstraints.py (100%) rename tests/framework/Optimizers/NSGAII/discrete/constrained/{ => MultiSumwConst}/myLocalSum_multi.py (100%) create mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml rename tests/framework/Optimizers/NSGAII/discrete/constrained/{ => ZDT1}/ZDT_model.py (100%) diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 39a3d9e15d..06fa1d628c 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -316,7 +316,7 @@ def finalizeSampler(self, failedRuns): @ In, failedRuns, list, runs that failed as part of this sampling @ Out, None """ - if len(self._objectiveVar) == 1: + if type(self._objectiveVar) == str: # get and print the best trajectory obtained bestValue = None bestTraj = None @@ -350,7 +350,7 @@ def finalizeSampler(self, failedRuns): self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + 'Perhaps the Model failed?') opt = self._optPointHistory[traj][-1][0] - val = opt[self._objectiveVar[0]] + val = opt[self._objectiveVar] self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) if bestValue is None or val < bestValue: bestValue = val diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml new file mode 100644 index 0000000000..a32463eb13 --- /dev/null +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml @@ -0,0 +1,160 @@ + + + + \raven\tests\framework\Optimizers\NSGAII\discrete\constrained\ + Junyung Kim, Mohammad Abdo + 2022-12-21 + + NSGA-II min-min test + + + + Multi_MinwoReplacement_Figure_720 + optimize,print + 2 + + + + + placeholder + myLocalSum + GAopt + opt_export + optOut + opt_export + + + opt_export + optOut + opt_export + optOut + + + + + + x1,x2,x3,x4,x5,x6,obj1,obj2 + + + + + + x1,x2,x3,x4,x5,x6 + + + x1,x2,x3,x4,x5,x6,obj1 + + + + + + 2 + 7 + withoutReplacement + + + + + + + 2 + 42 + every + min + + + + 10 + tournamentSelection + + + 0.7 + + + 0.7 + + + + + rankNcrowdingBased + + + 0.0 + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + obj1, obj2 + optOut + MC_samp + expConstr3 + impConstr3 + + + + + + + 10 + 050877 + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + + + + + + x1,x2,x3,x4,x5,x6 + obj1,obj2 + + + trajID + x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3, ConstraintEvaluation_impConstr3,fitness,accepted + + + + + + csv + optOut + + + csv + opt_export + trajID + + + diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/myConstraints.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py similarity index 100% rename from tests/framework/Optimizers/NSGAII/discrete/constrained/myConstraints.py rename to tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/myLocalSum_multi.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py similarity index 100% rename from tests/framework/Optimizers/NSGAII/discrete/constrained/myLocalSum_multi.py rename to tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml new file mode 100644 index 0000000000..80ad0c28a7 --- /dev/null +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml @@ -0,0 +1,295 @@ + + + + raven\tests\framework\Optimizers\NSGAII\discrete\constrained\ + Junyung Kim, Mohammad Abdo + 2023-02-21 + + ZDT1 test using NSGA-II + + + + ZDT1_result_300iter_150Popu + optimize,print + 1 + + + + + placeholder + ZDT + GAopt + opt_export + optOut + opt_export + + + opt_export + optOut + opt_export + optOut + + + + + + x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30,obj1,obj2 + + + + + + 0 + 1 + + + + + + + 300 + 42 + every + min + + + + 150 + tournamentSelection + + + 1.0 + + + 1.0 + + + + + rankNcrowdingBased + + + + 0.0 + + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + obj1, obj2 + optOut + MC_samp + + + + + + + 150 + 050877 + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + unifDist + + + + + + + + x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 + obj1,obj2 + + + trajID + x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30,obj1,obj2,age,batchId,rank,CD,fitness,accepted + + + + + + csv + optOut + + + csv + opt_export + trajID + + + diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py similarity index 100% rename from tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT_model.py rename to tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py From 391b9c3f168a8cbe046e586220b873079ab8d03b Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 14 Mar 2023 09:28:26 -0600 Subject: [PATCH 09/84] moving models, xmls, and trying to resolve GD after converting objective to a list --- ravenframework/Optimizers/GeneticAlgorithm.py | 4 +- ravenframework/Optimizers/GradientDescent.py | 18 +- ravenframework/Optimizers/RavenSampled.py | 11 +- ravenframework/Optimizers/fitness/fitness.py | 2 + .../optimizing/myConstraints.py | 108 ++++++++++++ .../optimizing/myLocalSum_multi.py | 43 +++++ .../continuous/unconstrained/ZDT1.xml | 132 +++++++++++++++ .../constrained/MinwoRepMultiObjective.xml | 160 ++++++++++++++++++ .../unconstrained/ZDT1/opt_export_0.csv | 150 ++++++++++++++++ .../Multi_MinwoReplacement/opt_export_0.csv | 13 ++ .../Optimizers/GeneticAlgorithms/tests | 20 +++ 11 files changed, 644 insertions(+), 17 deletions(-) create mode 100644 tests/framework/AnalyticModels/optimizing/myConstraints.py create mode 100644 tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py create mode 100644 tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml create mode 100644 tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml create mode 100644 tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv create mode 100644 tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index d358e1934c..e321cc0441 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -1244,8 +1244,8 @@ def _addToSolutionExport(self, traj, rlz, acceptable): 'fitness': rlz['fitness'], 'AHDp': self.ahdp, 'AHD': self.ahd, - 'rank': 0 if len(self._objectiveVar) == 1 else rlz['rank'], - 'CD': 0 if len(self._objectiveVar) == 1 else rlz['CD']} + 'rank': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['rank'], + 'CD': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['CD']} for var, val in self.constants.items(): toAdd[var] = val diff --git a/ravenframework/Optimizers/GradientDescent.py b/ravenframework/Optimizers/GradientDescent.py index cd64cddc04..7a37205c28 100644 --- a/ravenframework/Optimizers/GradientDescent.py +++ b/ravenframework/Optimizers/GradientDescent.py @@ -339,11 +339,11 @@ def _useRealization(self, info, rlz): @ Out, None """ traj = info['traj'] - if not self._canHandleMultiObjective and len(self._objectiveVar) == 1: - self._objectiveVar = self._objectiveVar[0] - elif type(self._objectiveVar) == list: + # if not self._canHandleMultiObjective and len(self._objectiveVar) == 1: + # self._objectiveVar = self._objectiveVar[0] + if len(self._objectiveVar) > 1 and type(self._objectiveVar)==list: self.raiseAnError(IOError, 'Gradient Descent does not support multiObjective optimization yet! objective variable must be a single variable for now!') - optVal = rlz[self._objectiveVar] + optVal = rlz[self._objectiveVar[0]] info['optVal'] = optVal purpose = info['purpose'] if purpose.startswith('opt'): @@ -358,13 +358,13 @@ def _useRealization(self, info, rlz): gradMag, gradVersor, _ = self._gradientInstance.evaluate(opt, grads, gradInfos, - self._objectiveVar) + self._objectiveVar[0]) self.raiseADebug(' ... gradient calculated ...') self._gradHistory[traj].append((gradMag, gradVersor)) # get new step information try: newOpt, stepSize, stepInfo = self._stepInstance.step(opt, - objVar=self._objectiveVar, + objVar=self._objectiveVar[0], optHist=self._optPointHistory[traj], gradientHist=self._gradHistory[traj], prevStepSize=self._stepHistory[traj], @@ -383,7 +383,7 @@ def _useRealization(self, info, rlz): except NoConstraintResolutionFound: # we've tried everything, but we just can't hack it self.raiseAMessage(f'Optimizer "{self.name}" trajectory {traj} was unable to continue due to functional or boundary constraints.') - self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar]) + self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar[0]]) return # update values if modified by constraint handling @@ -603,7 +603,7 @@ def _checkAcceptability(self, traj, opt, optVal, info): # Check acceptability if self._optPointHistory[traj]: old, _ = self._optPointHistory[traj][-1] - oldVal = old[self._objectiveVar] + oldVal = old[self._objectiveVar[0]] # check if following another trajectory if self._terminateFollowers: following = self._stepInstance.trajIsFollowing(traj, self.denormalizeData(opt), info, @@ -820,7 +820,7 @@ def _checkConvObjective(self, traj): return False o1, _ = self._optPointHistory[traj][-1] o2, _ = self._optPointHistory[traj][-2] - delta = mathUtils.relativeDiff(o2[self._objectiveVar], o1[self._objectiveVar]) + delta = mathUtils.relativeDiff(o2[self._objectiveVar[0]], o1[self._objectiveVar[0]]) converged = abs(delta) < self._convergenceCriteria['objective'] self.raiseADebug(self.convFormat.format(name='objective', conv=str(converged), diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 7d761561f7..061824a8ee 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -302,8 +302,7 @@ def localFinalizeActualSampling(self, jobObject, model, myInput): # so get the correct-signed value into the realization if self._minMax == 'max': if not self._canHandleMultiObjective and len(self._objectiveVar) == 1: - self._objectiveVar = self._objectiveVar[0] - rlz[self._objectiveVar] *= -1 + rlz[self._objectiveVar[0]] *= -1 elif type(self._objectiveVar) == list: for i in range(len(self._objectiveVar)): rlz[self._objectiveVar[i]] *= -1 @@ -353,7 +352,7 @@ def finalizeSampler(self, failedRuns): self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + 'Perhaps the Model failed?') opt = self._optPointHistory[traj][-1][0] - val = opt[self._objectiveVar] + val = opt[self._objectiveVar[0]] self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) if bestValue is None or val < bestValue: bestValue = val @@ -718,11 +717,11 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): 'rejectReason': rejectReason }) # optimal point input and output spaces - if type(self._objectiveVar) == str: # Single Objective Optimization - objValue = rlz[self._objectiveVar] + if len(self._objectiveVar) == 1: # Single Objective Optimization + objValue = rlz[self._objectiveVar[0]] if self._minMax == 'max': objValue *= -1 - toExport[self._objectiveVar] = objValue + toExport[self._objectiveVar[0]] = objValue else: # Multi Objective Optimization for i in range(len(self._objectiveVar)): objValue = rlz[self._objectiveVar[i]] diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 6158537cb1..d2481a34b6 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -24,6 +24,7 @@ # External Imports import numpy as np import xarray as xr +import sys # Internal Imports # [MANDD] Note: the fitness function are bounded by 2 parameters: a and b @@ -102,6 +103,7 @@ def rank_crowding(rlz,**kwargs): coords={'rank': np.arange(np.shape(offSpringRank)[0])}) offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, popSize=len(offSpringRank), objectives=np.array(offspringObjsVals)) + offSpringCD[offSpringCD==np.inf] = sys.float_info.max offSpringCD = xr.DataArray(offSpringCD, dims=['CrowdingDistance'], coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) diff --git a/tests/framework/AnalyticModels/optimizing/myConstraints.py b/tests/framework/AnalyticModels/optimizing/myConstraints.py new file mode 100644 index 0000000000..bc704cfa57 --- /dev/null +++ b/tests/framework/AnalyticModels/optimizing/myConstraints.py @@ -0,0 +1,108 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# @ author: Mohammad Abdo (@Jimmy-INL) + +import numpy as np + +def constrain(Input):#Complete this: give the function the correct name# + """ + This function calls the explicit constraint whose name is passed through Input.name + the evaluation function g is negative if the explicit constraint is violated and positive otherwise. + This suits the constraint handling in the Genetic Algorithms, + but not the Gradient Descent as the latter expects True if the solution passes the constraint and False if it violates it. + @ In, Input, object, RAVEN container + @ Out, g, float, explicit constraint evaluation (negative if violated and positive otherwise) + """ + g = eval(Input.name)(Input) + return g + +def implicitConstraint(Input): + """ + Evaluates the implicit constraint function at a given point/solution ($\vec(x)$) + @ In, Input, object, RAVEN container + @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function + the way the constraint is designed is that + the constraint function has to be >= 0, + so if: + 1) f(x,y) >= 0 then g = f + 2) f(x,y) >= a then g = f - a + 3) f(x,y) <= b then g = b - f + 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint) + """ + g = eval(Input.name)(Input) + return g + + +def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml# + """ + Let's assume that the constraint is: + $ x3+x4 < 8 $ + then g the constraint evaluation function (which has to be > 0) is taken to be: + g = 8 - (x3+x4) + in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 1 evaluation function + """ + g = 8 - Input.x3 - Input.x4 + return g + +def expConstr2(Input): + """ + Explicit Equality Constraint: + let's consider the constraint x1**2 + x2**2 = 25 + The way to write g is to use a very small number for instance, epsilon = 1e-12 + and then g = epsilon - abs(constraint) + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 2 evaluation function + """ + g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25) + return g + +def expConstr3(Input): + """ + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 3 evaluation function + """ + g = 10 - Input.x3 - Input.x4 + return g + +def impConstr1(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 1 evaluation function + """ + g = 10 - Input.x1**2 - Input.obj + return g + +def impConstr2(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 2 evaluation function + """ + g = Input.x1**2 + Input.obj - 10 + return g + +def impConstr3(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 3 evaluation function + """ + g = 100 - Input.obj1 + return g \ No newline at end of file diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py new file mode 100644 index 0000000000..509f852fd1 --- /dev/null +++ b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py @@ -0,0 +1,43 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Mohammad Abdo (@Jimmy-INL) + +def evaluate(Inputs): + Sum = 0 + LocalSum1 = 0 + LocalSum2 = 0 + # for ind,var in enumerate(Inputs.keys()): + # # write the objective function here + # Sum += (ind + 1) * Inputs[var] + # if (ind == 1): + # LocalSum1 = Sum + # return Sum[:], LocalSum1[:] + for ind,var in enumerate(Inputs.keys()): + # write the objective function here + Sum += (ind + 1) * Inputs[var] + if (ind == 0) or (ind == 1): + LocalSum1 += (ind + 1) * Inputs[var] + if (ind == 2) or (ind == 3): + LocalSum2 += (ind + 1) * Inputs[var] + return Sum[:], LocalSum1[:], LocalSum2[:] + +def run(self,Inputs): + """ + RAVEN API + @ In, self, object, RAVEN container + @ In, Inputs, dict, additional inputs + @ Out, None + """ + self.obj1,self.obj2,self.obj3 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3. diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml new file mode 100644 index 0000000000..94d0caf169 --- /dev/null +++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml @@ -0,0 +1,132 @@ + + + + raven/tests/framework/Optimizers/GA.MultiObjZDT1 + Junyung Kim + 2023-02-21 + + ZDT1 test using NSGA-II + + + + ZDT1 + optimize,print + 1 + + + + + placeholder + ZDT + GAopt + opt_export + optOut + opt_export + + + opt_export + optOut + opt_export + optOut + + + + + + x1,x2,x3,obj1,obj2 + + + + + + 0 + 1 + + + + + + + 15 + 42 + every + min + + + + 10 + tournamentSelection + + + 1.0 + + + 1.0 + + + + + rankNcrowdingBased + + + + 0.0 + + + + unifDist + + + unifDist + + + unifDist + + obj1, obj2 + optOut + MC_samp + + + + + + + 10 + 050877 + + + unifDist + + + unifDist + + + unifDist + + + + + + + + x1,x2,x3 + obj1,obj2 + + + trajID + x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted + + + + + + csv + optOut + + + csv + opt_export + trajID + + + diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml new file mode 100644 index 0000000000..f3f6ce1208 --- /dev/null +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -0,0 +1,160 @@ + + + + raven/tests/framework/Optimizers/GA. + Junyung Kim + 2022-12-21 + + NSGA-II min-min test + + + + Multi_MinwoReplacement + optimize,print + 2 + + + + + placeholder + myLocalSum + GAopt + opt_export + optOut + opt_export + + + opt_export + optOut + opt_export + optOut + + + + + + x1,x2,x3,x4,x5,x6,obj1,obj2 + + + + + + x1,x2,x3,x4,x5,x6 + + + x1,x2,x3,x4,x5,x6,obj1 + + + + + + 2 + 7 + withoutReplacement + + + + + + + 2 + 42 + every + min + + + + 10 + tournamentSelection + + + 0.7 + + + 0.7 + + + + + rankNcrowdingBased + + + 0.0 + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + obj1, obj2 + optOut + MC_samp + expConstr3 + impConstr3 + + + + + + + 10 + 050877 + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + woRep_dist + + + + + + + + x1,x2,x3,x4,x5,x6 + obj1,obj2 + + + trajID + x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3, ConstraintEvaluation_impConstr3,fitness,accepted + + + + + + csv + optOut + + + csv + opt_export + trajID + + + diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv new file mode 100644 index 0000000000..a4e2cdcdce --- /dev/null +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv @@ -0,0 +1,150 @@ +x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,2.0,1.0,inf,0.0,accepted +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,2.0,1.0,inf,0.0,accepted +0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,2.0,1.0,1.17548106192,0.0,accepted +0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,2.0,1.0,1.09190519404,0.0,accepted +0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,2.0,1.0,0.908094805963,0.0,accepted +0.713407223745,0.604534715322,0.183404509952,0.713407223745,1.81469798087,0.0,2.0,2.0,inf,0.0,accepted +0.13264102096,0.192211290866,0.4560699904,0.13264102096,2.31985816876,0.0,2.0,2.0,inf,0.0,accepted +0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,2.0,3.0,inf,0.0,accepted +0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,2.0,3.0,inf,0.0,accepted +0.524774661876,0.641621648018,0.39961784645,0.524774661876,2.65265663127,0.0,2.0,3.0,2.0,0.0,accepted +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,3.0,1.0,inf,0.0,accepted +0.110044764846,0.567700327273,0.738899003886,0.110044764846,4.18400045796,0.0,3.0,1.0,inf,0.0,accepted +0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,0.0,3.0,1.0,1.17365382082,0.0,accepted +0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,0.0,3.0,1.0,0.929582947246,0.0,accepted +0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,3.0,1.0,0.826346179184,0.0,accepted +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,3.0,2.0,inf,0.0,accepted +0.713407223745,0.604534715322,0.183404509952,0.713407223745,1.81469798087,0.0,3.0,2.0,inf,0.0,accepted +0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,3.0,2.0,1.2778379808,0.0,accepted +0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,3.0,2.0,1.1605567663,0.0,accepted +0.306377726911,0.192211290866,0.449754129036,0.306377726911,1.97909667942,0.0,3.0,3.0,inf,0.0,accepted +0.110044764846,0.567700327273,0.738899003886,0.110044764846,4.18400045796,0.0,4.0,1.0,inf,0.0,accepted +0.772244771889,0.00975325447734,0.39961784645,0.772244771889,0.916378249788,0.0,4.0,1.0,inf,0.0,accepted +0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,0.0,4.0,1.0,1.05555538382,0.0,accepted +0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,0.0,4.0,1.0,0.916323733867,0.0,accepted +0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,4.0,1.0,0.813065271377,0.0,accepted +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,4.0,1.0,0.249388538585,0.0,accepted +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,4.0,2.0,inf,0.0,accepted +0.13264102096,0.00975325447734,0.37081825509,0.13264102096,1.60872372044,0.0,4.0,2.0,inf,0.0,accepted +0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,4.0,3.0,inf,0.0,accepted +0.258779981001,0.192211290866,0.293488176375,0.258779981001,1.65969770087,0.0,4.0,3.0,inf,0.0,accepted +0.107891428309,0.192211290866,0.173364647239,0.107891428309,1.6211030056,4.0,5.0,1.0,inf,0.0,accepted +0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,4.0,5.0,1.0,inf,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,4.0,5.0,1.0,1.6002571283,0.0,accepted +0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,4.0,5.0,1.0,1.00964323224,0.0,accepted +0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,4.0,5.0,1.0,0.399742871701,0.0,accepted +0.772244771889,0.00975325447734,0.39961784645,0.772244771889,0.916378249788,4.0,5.0,2.0,inf,0.0,accepted +0.110044764846,0.192211290866,0.624354044354,0.110044764846,2.83356210322,4.0,5.0,2.0,inf,0.0,accepted +0.13264102096,0.00975325447734,0.37081825509,0.13264102096,1.60872372044,4.0,5.0,2.0,1.67384162643,0.0,accepted +0.713407223745,0.161221285621,0.431945021132,0.713407223745,1.37133891227,4.0,5.0,2.0,1.10926623497,0.0,accepted +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,4.0,5.0,2.0,0.326158373566,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,6.0,1.0,inf,0.0,accepted +0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,6.0,1.0,inf,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,6.0,1.0,1.10932398766,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,6.0,1.0,0.890676012342,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,6.0,1.0,0.712923687869,0.0,accepted +0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,1.0,6.0,2.0,inf,0.0,accepted +0.0648922466358,0.00975325447734,0.37081825509,0.0648922466358,1.76891341897,1.0,6.0,2.0,inf,0.0,accepted +0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,1.0,6.0,2.0,1.72344612754,0.0,accepted +0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,1.0,6.0,2.0,0.626926698592,0.0,accepted +0.107891428309,0.192211290866,0.173364647239,0.107891428309,1.6211030056,1.0,6.0,2.0,0.27655387246,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,7.0,1.0,inf,0.0,accepted +0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,7.0,1.0,inf,0.0,accepted +0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,1.0,7.0,1.0,0.865064939691,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,7.0,1.0,0.712923687869,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,7.0,1.0,0.651756198699,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,7.0,1.0,0.634832572855,0.0,accepted +0.0648922466358,0.00975325447734,0.242159936633,0.0648922466358,1.41819863445,1.0,7.0,1.0,0.422011372441,0.0,accepted +0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,1.0,7.0,2.0,inf,0.0,accepted +0.00695213070301,0.00975325447734,0.367783134656,0.00695213070301,2.01084637477,1.0,7.0,2.0,inf,0.0,accepted +0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,1.0,7.0,2.0,2.0,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,2.0,8.0,1.0,inf,0.0,accepted +0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,2.0,8.0,1.0,inf,0.0,accepted +0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,2.0,8.0,1.0,0.865064939691,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,2.0,8.0,1.0,0.712923687869,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,2.0,8.0,1.0,0.651756198699,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,2.0,8.0,1.0,0.634832572855,0.0,accepted +0.0648922466358,0.00975325447734,0.242159936633,0.0648922466358,1.41819863445,2.0,8.0,1.0,0.422011372441,0.0,accepted +0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,2.0,8.0,2.0,inf,0.0,accepted +0.00695213070301,0.00975325447734,0.367783134656,0.00695213070301,2.01084637477,2.0,8.0,2.0,inf,0.0,accepted +0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,2.0,8.0,2.0,2.0,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,0.0,9.0,1.0,inf,0.0,accepted +0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,0.0,9.0,1.0,inf,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,0.0,9.0,1.0,0.651756198699,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,0.0,9.0,1.0,0.513796266194,0.0,accepted +0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,0.0,9.0,1.0,0.468664639901,0.0,accepted +0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,0.0,9.0,1.0,0.454750192604,0.0,accepted +0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,0.0,9.0,1.0,0.422011372441,0.0,accepted +0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,0.0,9.0,1.0,0.410314747086,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,0.0,9.0,1.0,0.353145324752,0.0,accepted +0.568308599426,0.00975325447734,0.242159936633,0.568308599426,0.756839229015,0.0,9.0,1.0,0.228270418055,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,10.0,1.0,inf,0.0,accepted +0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,10.0,1.0,inf,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,10.0,1.0,0.651756198699,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,10.0,1.0,0.513796266194,0.0,accepted +0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,1.0,10.0,1.0,0.454750192604,0.0,accepted +0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,1.0,10.0,1.0,0.422011372441,0.0,accepted +0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,1.0,10.0,1.0,0.410314747086,0.0,accepted +0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,1.0,10.0,1.0,0.468664639901,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,10.0,1.0,0.353145324752,0.0,accepted +0.568308599426,0.00975325447734,0.242159936633,0.568308599426,0.756839229015,1.0,10.0,1.0,0.228270418055,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,2.0,11.0,1.0,inf,0.0,accepted +0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,2.0,11.0,1.0,inf,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,2.0,11.0,1.0,0.650175533333,0.0,accepted +0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,2.0,11.0,1.0,0.634536882707,0.0,accepted +0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,2.0,11.0,1.0,0.420037013968,0.0,accepted +0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,2.0,11.0,1.0,0.463954261861,0.0,accepted +0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,2.0,11.0,1.0,0.357366573279,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,2.0,11.0,1.0,0.351855878369,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,2.0,11.0,1.0,0.31630181039,0.0,accepted +0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,2.0,11.0,1.0,0.303090139359,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,6.0,12.0,1.0,inf,0.0,accepted +0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,6.0,12.0,1.0,inf,0.0,accepted +0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,6.0,12.0,1.0,0.650175533333,0.0,accepted +0.649632900872,0.00518486043559,0.184333673023,0.649632900872,0.559107570583,6.0,12.0,1.0,0.444060435829,0.0,accepted +0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,6.0,12.0,1.0,0.426680098555,0.0,accepted +0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,6.0,12.0,1.0,0.420037013968,0.0,accepted +0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,6.0,12.0,1.0,0.465833190838,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,6.0,12.0,1.0,0.546479400168,0.0,accepted +0.540635119784,0.00518486043559,0.228798159219,0.540635119784,0.742713199876,6.0,12.0,1.0,0.344191262312,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,6.0,12.0,1.0,0.31630181039,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,7.0,13.0,1.0,inf,0.0,accepted +0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,7.0,13.0,1.0,inf,0.0,accepted +0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,7.0,13.0,1.0,1.11412979519,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,7.0,13.0,1.0,0.818090041033,0.0,accepted +0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,7.0,13.0,1.0,0.527591261357,0.0,accepted +0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,7.0,13.0,1.0,0.368715073082,0.0,accepted +0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,7.0,13.0,1.0,0.358278943448,0.0,accepted +0.649632900872,0.00518486043559,0.184333673023,0.649632900872,0.559107570583,7.0,13.0,2.0,inf,0.0,accepted +0.00695213070301,0.00975325447734,0.34187967245,0.00695213070301,1.93537503877,7.0,13.0,2.0,inf,0.0,accepted +0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,7.0,13.0,2.0,2.0,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,14.0,1.0,inf,0.0,accepted +0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,1.0,14.0,1.0,inf,0.0,accepted +0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,1.0,14.0,1.0,1.149052421,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,14.0,1.0,0.776808582325,0.0,accepted +0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,1.0,14.0,1.0,0.46393693418,0.0,accepted +0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,1.0,14.0,1.0,0.333067034374,0.0,accepted +0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,1.0,14.0,1.0,0.228915255524,0.0,accepted +0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,1.0,14.0,1.0,0.191580577061,0.0,accepted +0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,1.0,14.0,2.0,inf,0.0,accepted +0.00695213070301,0.00975325447734,0.34187967245,0.00695213070301,1.93537503877,1.0,14.0,2.0,inf,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,4.0,15.0,1.0,inf,0.0,accepted +0.902552906634,0.00518486043559,0.0460026422623,0.902552906634,0.13319434186,4.0,15.0,1.0,inf,0.0,accepted +0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,4.0,15.0,1.0,0.94061235925,0.0,accepted +0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,4.0,15.0,1.0,0.879443568797,0.0,accepted +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,4.0,15.0,1.0,0.618089346206,0.0,accepted +0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,4.0,15.0,1.0,0.35946352349,0.0,accepted +0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,4.0,15.0,1.0,0.269867635023,0.0,accepted +0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,4.0,15.0,1.0,0.171430659521,0.0,accepted +0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,4.0,15.0,1.0,0.150132989794,0.0,accepted +0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,4.0,15.0,2.0,inf,0.0,accepted +0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,4.0,15.0,1.0,inf,0.0,final +0.902552906634,0.00518486043559,0.0460026422623,0.902552906634,0.13319434186,4.0,15.0,1.0,inf,0.0,final +0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,4.0,15.0,1.0,0.94061235925,0.0,final +0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,4.0,15.0,1.0,0.879443568797,0.0,final +0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,4.0,15.0,1.0,0.618089346206,0.0,final +0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,4.0,15.0,1.0,0.35946352349,0.0,final +0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,4.0,15.0,1.0,0.269867635023,0.0,final +0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,4.0,15.0,1.0,0.171430659521,0.0,final +0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,4.0,15.0,1.0,0.150132989794,0.0,final diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv new file mode 100644 index 0000000000..aa89cef81f --- /dev/null +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv @@ -0,0 +1,13 @@ +x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,fitness,accepted +7.0,4.0,5.0,3.0,6.0,2.0,84.0,15.0,1.0,2.0,1.0,inf,2.0,16.0,0.0,accepted +7.0,2.0,5.0,3.0,6.0,4.0,92.0,11.0,1.0,2.0,1.0,inf,2.0,8.0,0.0,accepted +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,1.0,2.0,2.0,2.0,1.0,6.0,0.0,accepted +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,1.0,2.0,2.0,inf,2.0,14.0,0.0,accepted +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,2.0,inf,3.0,3.0,0.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,3.0,inf,3.0,11.0,0.0,accepted +4.0,5.0,3.0,6.0,2.0,7.0,99.0,14.0,1.0,2.0,3.0,inf,1.0,1.0,0.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,5.0,inf,3.0,11.0,0.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,4.0,inf,3.0,11.0,0.0,accepted +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,1.0,2.0,6.0,inf,1.0,10.0,0.0,accepted +7.0,4.0,5.0,3.0,6.0,2.0,84.0,15.0,1.0,2.0,1.0,inf,2.0,16.0,0.0,final +7.0,2.0,5.0,3.0,6.0,4.0,92.0,11.0,1.0,2.0,1.0,inf,2.0,8.0,0.0,final diff --git a/tests/framework/Optimizers/GeneticAlgorithms/tests b/tests/framework/Optimizers/GeneticAlgorithms/tests index 50dfdf499d..65fb37ace3 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/tests +++ b/tests/framework/Optimizers/GeneticAlgorithms/tests @@ -320,4 +320,24 @@ rel_err = 0.001 [../] [../] + + [./NSGA-II_MinwoRepMultiObjective] + type = 'RavenFramework' + input = 'discrete/constrained/MinwoRepMultiObjective.xml' + [./csv] + type = OrderedCSV + output = 'discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv' + rel_err = 0.001 + [../] + [../] + + [./NSGA-II_ZDT1] + type = 'RavenFramework' + input = 'continuous/unconstrained/ZDT1.xml' + [./csv] + type = OrderedCSV + output = 'continuous/unconstrained/ZDT1/opt_export_0.csv' + rel_err = 0.001 + [../] + [../] [] From da9e0dde401f88215714cda5c9e79fa4d21aca34 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 21 Mar 2023 14:38:43 -0600 Subject: [PATCH 10/84] fixing simulated annealing to accept a list of objectives --- .../Optimizers/SimulatedAnnealing.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/ravenframework/Optimizers/SimulatedAnnealing.py b/ravenframework/Optimizers/SimulatedAnnealing.py index ab47591812..18364ac655 100644 --- a/ravenframework/Optimizers/SimulatedAnnealing.py +++ b/ravenframework/Optimizers/SimulatedAnnealing.py @@ -301,13 +301,11 @@ def _useRealization(self, info, rlz): @ Out, None """ traj = info['traj'] - if len(self._objectiveVar) == 1: - self._objectiveVar = self._objectiveVar[0] - elif type(self._objectiveVar) == list: + if len(self._objectiveVar) > 1 and type(self._objectiveVar)==str: self.raiseAnError(IOError, 'Simulated Annealing does not support multiObjective yet! objective variable must be a single variable for now!') - info['optVal'] = rlz[self._objectiveVar] + info['optVal'] = rlz[self._objectiveVar[0]] self.incrementIteration(traj) - self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar], info) + self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar[0]], info) if self._stepTracker[traj]['opt'] is None: # revert to the last accepted point rlz = self._optPointHistory[traj][-1][0] @@ -326,7 +324,7 @@ def _useRealization(self, info, rlz): except NoConstraintResolutionFound: # we've tried everything, but we just can't hack it self.raiseAMessage(f'Optimizer "{self.name}" trajectory {traj} was unable to continue due to functional or boundary constraints.') - self._closeTrajectory(traj, 'converge', 'no constraint resolution', newPoint[self._objectiveVar]) + self._closeTrajectory(traj, 'converge', 'no constraint resolution', newPoint[self._objectiveVar[0]]) return self._submitRun(suggested, traj, self.getIteration(traj)) @@ -398,7 +396,7 @@ def _checkConvObjective(self, traj): return False o1, _ = self._optPointHistory[traj][-1] o2, _ = self._optPointHistory[traj][-2] - delta = o2[self._objectiveVar]-o1[self._objectiveVar] + delta = o2[self._objectiveVar[0]]-o1[self._objectiveVar[0]] converged = abs(delta) < self._convergenceCriteria['objective'] self.raiseADebug(self.convFormat.format(name='objective', conv=str(converged), @@ -447,9 +445,9 @@ def _checkAcceptability(self, traj, opt, optVal, info): # NOTE: if self._optPointHistory[traj]: -> faster to use "try" for all but the first time try: old, _ = self._optPointHistory[traj][-1] - oldVal = old[self._objectiveVar] + oldVal = old[self._objectiveVar[0]] # check if same point - self.raiseADebug(f' ... change: {opt[self._objectiveVar]-oldVal:1.3e} new objective: {opt[self._objectiveVar]:1.6e} old objective: {oldVal:1.6e}') + self.raiseADebug(f' ... change: {opt[self._objectiveVar[0]]-oldVal:1.3e} new objective: {opt[self._objectiveVar[0]]:1.6e} old objective: {oldVal:1.6e}') # if this is an opt point rerun, accept it without checking. if self._acceptRerun[traj]: acceptable = 'rerun' @@ -458,7 +456,7 @@ def _checkAcceptability(self, traj, opt, optVal, info): # this is the classic "same point" trap; we accept the same point, and check convergence later acceptable = 'accepted' else: - if self._acceptabilityCriterion(oldVal,opt[self._objectiveVar])>randomUtils.random(dim=1, samples=1): # TODO replace it back + if self._acceptabilityCriterion(oldVal,opt[self._objectiveVar[0]])>randomUtils.random(dim=1, samples=1): # TODO replace it back acceptable = 'accepted' else: acceptable = 'rejected' From 1fd2175ac0e2d49e454a9ada92087189cb3e089a Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 21 Mar 2023 22:52:18 -0600 Subject: [PATCH 11/84] fixing rook to compare infs --- rook/OrderedCSVDiffer.py | 5 ++++ .../constrained/MinwoRepMultiObjective.xml | 2 +- .../Multi_MinwoReplacement/opt_export_0.csv | 25 ++++++++++--------- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/rook/OrderedCSVDiffer.py b/rook/OrderedCSVDiffer.py index 65d002d7ba..9561832f42 100644 --- a/rook/OrderedCSVDiffer.py +++ b/rook/OrderedCSVDiffer.py @@ -17,6 +17,7 @@ from __future__ import division, print_function, unicode_literals, absolute_import import sys import csv +import numpy as np from Tester import Differ @@ -92,6 +93,10 @@ def matches(self, a_obj, b_obj, is_number, tol): """ if not is_number: return a_obj == b_obj, 0 + if np.isnan(a_obj) and np.isnan(b_obj): + return True, 0 + if np.isinf(a_obj) and np.isinf(b_obj) and a_obj==b_obj: + return True, 0 if self.__ignore_sign: a_obj = abs(a_obj) b_obj = abs(b_obj) diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index f3f6ce1208..b72dd0532a 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -1,7 +1,7 @@ - raven/tests/framework/Optimizers/GA. + raven/tests/framework/Optimizers/GeneticAlgorithms.NSGAII Junyung Kim 2022-12-21 diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv index aa89cef81f..ecd64246ae 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv @@ -1,13 +1,14 @@ x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,fitness,accepted -7.0,4.0,5.0,3.0,6.0,2.0,84.0,15.0,1.0,2.0,1.0,inf,2.0,16.0,0.0,accepted -7.0,2.0,5.0,3.0,6.0,4.0,92.0,11.0,1.0,2.0,1.0,inf,2.0,8.0,0.0,accepted -4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,1.0,2.0,2.0,2.0,1.0,6.0,0.0,accepted -7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,1.0,2.0,2.0,inf,2.0,14.0,0.0,accepted -6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,2.0,inf,3.0,3.0,0.0,accepted -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,3.0,inf,3.0,11.0,0.0,accepted -4.0,5.0,3.0,6.0,2.0,7.0,99.0,14.0,1.0,2.0,3.0,inf,1.0,1.0,0.0,accepted -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,5.0,inf,3.0,11.0,0.0,accepted -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,4.0,inf,3.0,11.0,0.0,accepted -4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,1.0,2.0,6.0,inf,1.0,10.0,0.0,accepted -7.0,4.0,5.0,3.0,6.0,2.0,84.0,15.0,1.0,2.0,1.0,inf,2.0,16.0,0.0,final -7.0,2.0,5.0,3.0,6.0,4.0,92.0,11.0,1.0,2.0,1.0,inf,2.0,8.0,0.0,final +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,2.0,1.0,2.0,1.0,6.0,0.0,accepted +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,2.0,1.0,inf,2.0,14.0,0.0,accepted +4.0,5.0,7.0,2.0,3.0,6.0,94.0,14.0,0.0,2.0,2.0,inf,1.0,6.0,0.0,accepted +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,2.0,1.0,inf,3.0,3.0,0.0,accepted +4.0,6.0,7.0,3.0,5.0,2.0,86.0,16.0,0.0,2.0,2.0,inf,0.0,14.0,0.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,2.0,2.0,2.0,3.0,11.0,0.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,2.0,3.0,inf,3.0,11.0,0.0,accepted +7.0,5.0,3.0,2.0,6.0,4.0,88.0,17.0,0.0,2.0,3.0,inf,5.0,12.0,0.0,accepted +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,0.0,2.0,4.0,inf,1.0,10.0,0.0,accepted +7.0,4.0,3.0,2.0,5.0,6.0,93.0,15.0,0.0,2.0,4.0,inf,5.0,7.0,0.0,accepted +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,2.0,1.0,2.0,1.0,6.0,0.0,final +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,2.0,1.0,inf,2.0,14.0,0.0,final +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,2.0,1.0,inf,3.0,3.0,0.0,final From 305c2aced9988ab30a229f92d2d6e4414ab136bb Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sat, 1 Apr 2023 00:00:22 -0600 Subject: [PATCH 12/84] making one mod in RAVENSAmpled --- plugins/HERON | 1 - plugins/TEAL | 2 +- ravenframework/Optimizers/RavenSampled.py | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) delete mode 160000 plugins/HERON diff --git a/plugins/HERON b/plugins/HERON deleted file mode 160000 index a5853fa4c6..0000000000 --- a/plugins/HERON +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a5853fa4c6fdfe6a5cde6023608a974122135e5e diff --git a/plugins/TEAL b/plugins/TEAL index cf79281b57..8572ef2e6f 160000 --- a/plugins/TEAL +++ b/plugins/TEAL @@ -1 +1 @@ -Subproject commit cf79281b57afbeaed4bce0aa26be2376799ac336 +Subproject commit 8572ef2e6fa6b89b6651db40c9973b47d8b76a45 diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 6755c27ec4..c79f3a7cd8 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -352,7 +352,7 @@ def finalizeSampler(self, failedRuns): self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + 'Perhaps the Model failed?') opt = self._optPointHistory[traj][-1][0] - val = opt[self._objectiveVar] + val = opt[self._objectiveVar[0]] self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) if bestValue is None or val < bestValue: bestValue = val @@ -419,7 +419,7 @@ def finalizeSampler(self, failedRuns): self.raiseAMessage(' - Final Optimal Point:') finalTemplate = ' {name:^20s} {value: 1.3e}' finalTemplateInt = ' {name:^20s} {value: 3d}' - self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue)) + # self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue)) self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj)) for var, val in bestPoint.items(): self.raiseAMessage(finalTemplate.format(name=var, value=val)) From c820eeab83f88dc3294d442bc2484d3110dd6a23 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 3 Apr 2023 10:53:46 -0600 Subject: [PATCH 13/84] making self._minMax a list --- ravenframework/Optimizers/Optimizer.py | 7 +++++-- ravenframework/Optimizers/RavenSampled.py | 14 ++++++++------ .../continuous/unconstrained/ZDT1.xml | 2 +- .../constrained/MinwoRepMultiObjective.xml | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index 011f6fd9d3..5c3a2af2f1 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -103,7 +103,8 @@ def getInputSpecification(cls): descr=r"""seed for random number generation. Note that by default RAVEN uses an internal seed, so this seed must be changed to observe changed behavior. \default{RAVEN-determined}""") minMaxEnum = InputTypes.makeEnumType('MinMax', 'MinMaxType', ['min', 'max']) - minMax = InputData.parameterInputFactory('type', contentType=minMaxEnum, + minMaxList = InputTypes.StringListType() + minMax = InputData.parameterInputFactory('type', contentType=minMaxList, descr=r"""the type of optimization to perform. \xmlString{min} will search for the lowest \xmlNode{objective} value, while \xmlString{max} will search for the highest value.""") init.addSub(seed) @@ -161,7 +162,7 @@ def __init__(self): # public # _protected self._seed = None # random seed to apply - self._minMax = 'min' # maximization or minimization? + self._minMax = ['min'] # maximization or minimization? self._activeTraj = [] # tracks live trajectories self._cancelledTraj = {} # tracks cancelled trajectories, and reasons self._convergedTraj = {} # tracks converged trajectories, and values obtained @@ -261,6 +262,8 @@ def handleInput(self, paramInput): minMax = init.findFirst('type') if minMax is not None: self._minMax = minMax.value + if len(self._minMax) != len(self._objectiveVar): + self.raiseAnError(IOError, 'minMax and objectiveVar must be of the same length!') # variables additional reading for varNode in paramInput.findAll('variable'): diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index c79f3a7cd8..70b0518c3d 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -300,12 +300,14 @@ def localFinalizeActualSampling(self, jobObject, model, myInput): # # testing suggests no big deal on smaller problem # the sign of the objective function is flipped in case we do maximization # so get the correct-signed value into the realization - if self._minMax == 'max': + + if 'max' in self._minMax: if not self._canHandleMultiObjective and len(self._objectiveVar) == 1: rlz[self._objectiveVar[0]] *= -1 elif type(self._objectiveVar) == list: for i in range(len(self._objectiveVar)): - rlz[self._objectiveVar[i]] *= -1 + if self._minMax[i] == 'max': + rlz[self._objectiveVar[i]] *= -1 else: rlz[self._objectiveVar] *= -1 # TODO FIXME let normalizeData work on an xr.DataSet (batch) not just a dictionary! @@ -323,7 +325,7 @@ def finalizeSampler(self, failedRuns): bestValue = None bestTraj = None bestPoint = None - s = -1 if self._minMax == 'max' else 1 + s = -1 if 'max' in self._minMax else 1 # check converged trajectories self.raiseAMessage('*' * 80) self.raiseAMessage('Optimizer Final Results:') @@ -432,7 +434,7 @@ def finalizeSampler(self, failedRuns): key = list(opt.keys()) val = [item[i] for item in opt.values()] optElm = {key[a]: val[a] for a in range(len(key))} - optVal = [s*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))] + optVal = [(-1*(self._minMax[b]=='max')+(self._minMax[b]=='min'))*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))] bestTraj = traj bestOpt = self.denormalizeData(optElm) @@ -719,13 +721,13 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): # optimal point input and output spaces if len(self._objectiveVar) == 1: # Single Objective Optimization objValue = rlz[self._objectiveVar[0]] - if self._minMax == 'max': + if 'max' in self._minMax: objValue *= -1 toExport[self._objectiveVar[0]] = objValue else: # Multi Objective Optimization for i in range(len(self._objectiveVar)): objValue = rlz[self._objectiveVar[i]] - if self._minMax == 'max': + if self._minMax[i] == 'max': objValue *= -1 toExport[self._objectiveVar[i]] = objValue toExport.update(self.denormalizeData(dict((var, rlz[var]) for var in self.toBeSampled))) diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml index 94d0caf169..04cd9bce62 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml @@ -50,7 +50,7 @@ 15 42 every - min + min,min diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index b72dd0532a..d6695c1571 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -60,7 +60,7 @@ 2 42 every - min + min,min From 21bf42d9ba9f95c18752d76e8b6eab4f6e12f1ab Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 3 Apr 2023 12:32:52 -0600 Subject: [PATCH 14/84] erroring out if type is not in ['min', 'max'] --- ravenframework/Optimizers/Optimizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index 5c3a2af2f1..511a094797 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -263,7 +263,9 @@ def handleInput(self, paramInput): if minMax is not None: self._minMax = minMax.value if len(self._minMax) != len(self._objectiveVar): - self.raiseAnError(IOError, 'minMax and objectiveVar must be of the same length!') + self.raiseAnError(IOError, 'type and objective must be of the same length!') + if list(set(self._minMax)-set(['min','max'])) != []: + self.raiseAnError(IOError, "type must be a list of 'min' and/or 'max'") # variables additional reading for varNode in paramInput.findAll('variable'): From e639803f3f24fc158b4c308cd97370d4993a1938 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 3 Apr 2023 16:59:36 -0600 Subject: [PATCH 15/84] updating HERON to b316024 --- plugins/HERON | 1 + 1 file changed, 1 insertion(+) create mode 160000 plugins/HERON diff --git a/plugins/HERON b/plugins/HERON new file mode 160000 index 0000000000..b31602460c --- /dev/null +++ b/plugins/HERON @@ -0,0 +1 @@ +Subproject commit b31602460c83b7ff9cddfd2a6d1fd206b2f9d7bc From be64a4db6826a7691016cb5384a52439fbdf4984 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 3 Apr 2023 21:03:41 -0600 Subject: [PATCH 16/84] updating dependencies --- dependencies.xml | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/dependencies.xml b/dependencies.xml index b29ebc7270..e7989d9cd3 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -36,25 +36,19 @@ Note all install methods after "main" take
- 1.21 + 1.22 1.7 1.0 1.3 - - 0.19 + + 2023 1.5 - 3.3 + 3.5 0.13 2.2 - 2.9 + 2.10 - 3.7 + 3.8 3 @@ -64,11 +58,11 @@ Note all install methods after "main" take - + - 2.1 + 2.2 1.13 @@ -84,7 +78,6 @@ Note all install methods after "main" take - 3
@@ -103,4 +96,4 @@ Note all install methods after "main" take remove remove -
+ \ No newline at end of file From 95682a16c3bbe248cddea39d7cdd8cfc0f3430dd Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 4 Apr 2023 13:51:03 -0600 Subject: [PATCH 17/84] removing a trailing space --- tests/framework/AnalyticModels/optimizing/myLocalSum.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum.py b/tests/framework/AnalyticModels/optimizing/myLocalSum.py index faec353eb6..d4b6fec246 100644 --- a/tests/framework/AnalyticModels/optimizing/myLocalSum.py +++ b/tests/framework/AnalyticModels/optimizing/myLocalSum.py @@ -81,5 +81,4 @@ def constrain(self): and negative if violated. """ explicitConstrain = constraint(self) - return explicitConstrain - + return explicitConstrain \ No newline at end of file From c3688e2e82cc87fdcc977b1e0dc120f4bd9af565 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 4 Apr 2023 17:01:41 -0600 Subject: [PATCH 18/84] removing windows line endings --- .../MultiObjective/myLocalSum_multi.py | 43 +++++++++++++++++++ .../optimizing/myConstraints.py | 2 +- .../optimizing/myLocalSum_multi.py | 8 +--- 3 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 tests/framework/AnalyticModels/optimizing/MultiObjective/myLocalSum_multi.py diff --git a/tests/framework/AnalyticModels/optimizing/MultiObjective/myLocalSum_multi.py b/tests/framework/AnalyticModels/optimizing/MultiObjective/myLocalSum_multi.py new file mode 100644 index 0000000000..86ef17bdeb --- /dev/null +++ b/tests/framework/AnalyticModels/optimizing/MultiObjective/myLocalSum_multi.py @@ -0,0 +1,43 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Mohammad Abdo (@Jimmy-INL) + +def evaluate(Inputs): + Sum = 0 + LocalSum1 = 0 + LocalSum2 = 0 + # for ind,var in enumerate(Inputs.keys()): + # # write the objective function here + # Sum += (ind + 1) * Inputs[var] + # if (ind == 1): + # LocalSum1 = Sum + # return Sum[:], LocalSum1[:] + for ind,var in enumerate(Inputs.keys()): + # write the objective function here + Sum += (ind + 1) * Inputs[var] + if (ind == 0) or (ind == 1): + LocalSum1 += (ind + 1) * Inputs[var] + if (ind == 2) or (ind == 3): + LocalSum2 += (ind + 1) * Inputs[var] + return Sum[:], LocalSum1[:], LocalSum2[:] + +def run(self,Inputs): + """ + RAVEN API + @ In, self, object, RAVEN container + @ In, Inputs, dict, additional inputs + @ Out, None + """ + self.obj1,self.obj2,self.obj3 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3. diff --git a/tests/framework/AnalyticModels/optimizing/myConstraints.py b/tests/framework/AnalyticModels/optimizing/myConstraints.py index bc704cfa57..b57cda4ce6 100644 --- a/tests/framework/AnalyticModels/optimizing/myConstraints.py +++ b/tests/framework/AnalyticModels/optimizing/myConstraints.py @@ -102,7 +102,7 @@ def impConstr3(Input): The implicit constraint involves variables from the output space, for example the objective variable or a dependent variable that is not in the optimization search space @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 3 evaluation function + @ out, g, float, implicit constraint #3 evaluation function """ g = 100 - Input.obj1 return g \ No newline at end of file diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py index 509f852fd1..e012b851e9 100644 --- a/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py +++ b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py @@ -18,12 +18,6 @@ def evaluate(Inputs): Sum = 0 LocalSum1 = 0 LocalSum2 = 0 - # for ind,var in enumerate(Inputs.keys()): - # # write the objective function here - # Sum += (ind + 1) * Inputs[var] - # if (ind == 1): - # LocalSum1 = Sum - # return Sum[:], LocalSum1[:] for ind,var in enumerate(Inputs.keys()): # write the objective function here Sum += (ind + 1) * Inputs[var] @@ -40,4 +34,4 @@ def run(self,Inputs): @ In, Inputs, dict, additional inputs @ Out, None """ - self.obj1,self.obj2,self.obj3 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3. + self.obj1,self.obj2,self.obj3 = evaluate(Inputs) From e25cc37eaf22505e5df947980c5735e0e5025073 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 5 Apr 2023 08:51:06 -0600 Subject: [PATCH 19/84] change to unix ending --- .../optimizing/myConstraints.py | 216 +++++++++--------- .../optimizing/myLocalSum_multi.py | 74 +++--- 2 files changed, 145 insertions(+), 145 deletions(-) diff --git a/tests/framework/AnalyticModels/optimizing/myConstraints.py b/tests/framework/AnalyticModels/optimizing/myConstraints.py index b57cda4ce6..d63407ef6f 100644 --- a/tests/framework/AnalyticModels/optimizing/myConstraints.py +++ b/tests/framework/AnalyticModels/optimizing/myConstraints.py @@ -1,108 +1,108 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# @ author: Mohammad Abdo (@Jimmy-INL) - -import numpy as np - -def constrain(Input):#Complete this: give the function the correct name# - """ - This function calls the explicit constraint whose name is passed through Input.name - the evaluation function g is negative if the explicit constraint is violated and positive otherwise. - This suits the constraint handling in the Genetic Algorithms, - but not the Gradient Descent as the latter expects True if the solution passes the constraint and False if it violates it. - @ In, Input, object, RAVEN container - @ Out, g, float, explicit constraint evaluation (negative if violated and positive otherwise) - """ - g = eval(Input.name)(Input) - return g - -def implicitConstraint(Input): - """ - Evaluates the implicit constraint function at a given point/solution ($\vec(x)$) - @ In, Input, object, RAVEN container - @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function - the way the constraint is designed is that - the constraint function has to be >= 0, - so if: - 1) f(x,y) >= 0 then g = f - 2) f(x,y) >= a then g = f - a - 3) f(x,y) <= b then g = b - f - 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint) - """ - g = eval(Input.name)(Input) - return g - - -def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml# - """ - Let's assume that the constraint is: - $ x3+x4 < 8 $ - then g the constraint evaluation function (which has to be > 0) is taken to be: - g = 8 - (x3+x4) - in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa - @ In, Input, object, RAVEN container - @ out, g, float, explicit constraint 1 evaluation function - """ - g = 8 - Input.x3 - Input.x4 - return g - -def expConstr2(Input): - """ - Explicit Equality Constraint: - let's consider the constraint x1**2 + x2**2 = 25 - The way to write g is to use a very small number for instance, epsilon = 1e-12 - and then g = epsilon - abs(constraint) - @ In, Input, object, RAVEN container - @ out, g, float, explicit constraint 2 evaluation function - """ - g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25) - return g - -def expConstr3(Input): - """ - @ In, Input, object, RAVEN container - @ out, g, float, explicit constraint 3 evaluation function - """ - g = 10 - Input.x3 - Input.x4 - return g - -def impConstr1(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 1 evaluation function - """ - g = 10 - Input.x1**2 - Input.obj - return g - -def impConstr2(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 2 evaluation function - """ - g = Input.x1**2 + Input.obj - 10 - return g - -def impConstr3(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint #3 evaluation function - """ - g = 100 - Input.obj1 - return g \ No newline at end of file +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# @ author: Mohammad Abdo (@Jimmy-INL) + +import numpy as np + +def constrain(Input):#Complete this: give the function the correct name# + """ + This function calls the explicit constraint whose name is passed through Input.name + the evaluation function g is negative if the explicit constraint is violated and positive otherwise. + This suits the constraint handling in the Genetic Algorithms, + but not the Gradient Descent as the latter expects True if the solution passes the constraint and False if it violates it. + @ In, Input, object, RAVEN container + @ Out, g, float, explicit constraint evaluation (negative if violated and positive otherwise) + """ + g = eval(Input.name)(Input) + return g + +def implicitConstraint(Input): + """ + Evaluates the implicit constraint function at a given point/solution ($\vec(x)$) + @ In, Input, object, RAVEN container + @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function + the way the constraint is designed is that + the constraint function has to be >= 0, + so if: + 1) f(x,y) >= 0 then g = f + 2) f(x,y) >= a then g = f - a + 3) f(x,y) <= b then g = b - f + 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint) + """ + g = eval(Input.name)(Input) + return g + + +def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml# + """ + Let's assume that the constraint is: + $ x3+x4 < 8 $ + then g the constraint evaluation function (which has to be > 0) is taken to be: + g = 8 - (x3+x4) + in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 1 evaluation function + """ + g = 8 - Input.x3 - Input.x4 + return g + +def expConstr2(Input): + """ + Explicit Equality Constraint: + let's consider the constraint x1**2 + x2**2 = 25 + The way to write g is to use a very small number for instance, epsilon = 1e-12 + and then g = epsilon - abs(constraint) + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 2 evaluation function + """ + g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25) + return g + +def expConstr3(Input): + """ + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 3 evaluation function + """ + g = 10 - Input.x3 - Input.x4 + return g + +def impConstr1(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 1 evaluation function + """ + g = 10 - Input.x1**2 - Input.obj + return g + +def impConstr2(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 2 evaluation function + """ + g = Input.x1**2 + Input.obj - 10 + return g + +def impConstr3(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint #3 evaluation function + """ + g = 100 - Input.obj1 + return g diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py index e012b851e9..2bad9f6b44 100644 --- a/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py +++ b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py @@ -1,37 +1,37 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# @author: Mohammad Abdo (@Jimmy-INL) - -def evaluate(Inputs): - Sum = 0 - LocalSum1 = 0 - LocalSum2 = 0 - for ind,var in enumerate(Inputs.keys()): - # write the objective function here - Sum += (ind + 1) * Inputs[var] - if (ind == 0) or (ind == 1): - LocalSum1 += (ind + 1) * Inputs[var] - if (ind == 2) or (ind == 3): - LocalSum2 += (ind + 1) * Inputs[var] - return Sum[:], LocalSum1[:], LocalSum2[:] - -def run(self,Inputs): - """ - RAVEN API - @ In, self, object, RAVEN container - @ In, Inputs, dict, additional inputs - @ Out, None - """ - self.obj1,self.obj2,self.obj3 = evaluate(Inputs) +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Mohammad Abdo (@Jimmy-INL) + +def evaluate(Inputs): + Sum = 0 + LocalSum1 = 0 + LocalSum2 = 0 + for ind,var in enumerate(Inputs.keys()): + # write the objective function here + Sum += (ind + 1) * Inputs[var] + if (ind == 0) or (ind == 1): + LocalSum1 += (ind + 1) * Inputs[var] + if (ind == 2) or (ind == 3): + LocalSum2 += (ind + 1) * Inputs[var] + return Sum[:], LocalSum1[:], LocalSum2[:] + +def run(self,Inputs): + """ + RAVEN API + @ In, self, object, RAVEN container + @ In, Inputs, dict, additional inputs + @ Out, None + """ + self.obj1,self.obj2,self.obj3 = evaluate(Inputs) From f0d141293d7b65111d1fcf7cc3edbdf4b53d4df7 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 5 Apr 2023 09:31:29 -0600 Subject: [PATCH 20/84] adding the zdt_model.py --- .../AnalyticModels/optimizing/ZDT_model.py | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 tests/framework/AnalyticModels/optimizing/ZDT_model.py diff --git a/tests/framework/AnalyticModels/optimizing/ZDT_model.py b/tests/framework/AnalyticModels/optimizing/ZDT_model.py new file mode 100644 index 0000000000..851773cee6 --- /dev/null +++ b/tests/framework/AnalyticModels/optimizing/ZDT_model.py @@ -0,0 +1,41 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Junyung Kim (@JunyungKim-INL) and Mohammad Abdo (@Jimmy-INL) + +import math + +def evaluate(Inputs): + Sum = 0 + obj1 = 0 + + for ind,var in enumerate(Inputs.keys()): + # write the objective function here + if (ind == 0) : + obj1 += Inputs[var] + if (ind != 0): + Sum += Inputs[var] + g = 1 + (9/len(Inputs.keys())*Sum ) + h = 1 - math.sqrt(obj1/g) + obj2 = g*h + return obj1[:], obj2[:] + +def run(self,Inputs): + """ + RAVEN API + @ In, self, object, RAVEN container + @ In, Inputs, dict, additional inputs + @ Out, None + """ + self.obj1,self.obj2 = evaluate(Inputs) From c2ca46e50906b13206efbba4d41dd3c9db6e44fd Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 5 Apr 2023 09:41:57 -0600 Subject: [PATCH 21/84] converting zdt to unix line endings --- .../AnalyticModels/optimizing/ZDT_model.py | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/tests/framework/AnalyticModels/optimizing/ZDT_model.py b/tests/framework/AnalyticModels/optimizing/ZDT_model.py index 851773cee6..ca53da447d 100644 --- a/tests/framework/AnalyticModels/optimizing/ZDT_model.py +++ b/tests/framework/AnalyticModels/optimizing/ZDT_model.py @@ -1,41 +1,41 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# @author: Junyung Kim (@JunyungKim-INL) and Mohammad Abdo (@Jimmy-INL) - -import math - -def evaluate(Inputs): - Sum = 0 - obj1 = 0 - - for ind,var in enumerate(Inputs.keys()): - # write the objective function here - if (ind == 0) : - obj1 += Inputs[var] - if (ind != 0): - Sum += Inputs[var] - g = 1 + (9/len(Inputs.keys())*Sum ) - h = 1 - math.sqrt(obj1/g) - obj2 = g*h - return obj1[:], obj2[:] - -def run(self,Inputs): - """ - RAVEN API - @ In, self, object, RAVEN container - @ In, Inputs, dict, additional inputs - @ Out, None - """ - self.obj1,self.obj2 = evaluate(Inputs) +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Junyung Kim (@JunyungKim-INL) and Mohammad Abdo (@Jimmy-INL) + +import math + +def evaluate(Inputs): + Sum = 0 + obj1 = 0 + + for ind,var in enumerate(Inputs.keys()): + # write the objective function here + if (ind == 0) : + obj1 += Inputs[var] + if (ind != 0): + Sum += Inputs[var] + g = 1 + (9/len(Inputs.keys())*Sum ) + h = 1 - math.sqrt(obj1/g) + obj2 = g*h + return obj1[:], obj2[:] + +def run(self,Inputs): + """ + RAVEN API + @ In, self, object, RAVEN container + @ In, Inputs, dict, additional inputs + @ Out, None + """ + self.obj1,self.obj2 = evaluate(Inputs) From 1f1b969ab6287869ac81cf7c29e05d59d80f8840 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 6 Apr 2023 09:43:58 -0600 Subject: [PATCH 22/84] Juan's change to simulateData for the interface --- .../SIMULATE3/SimulateData.py | 40 ++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index 817a7d5381..7b9fc67d09 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -43,6 +43,8 @@ def __init__(self,filen): self.data["PinPowerPeaking"] = self.pinPeaking() self.data["exposure"] = self.burnupEOC() self.data["assembly_power"] = self.assemblyPeakingFactors() + self.data["fuel_type"] = self.fa_type() +# self.data["pin_peaking"] = self.pinPeaking() # this is a dummy variable for demonstration with MOF # check if something has been found if all(v is None for v in self.data.values()): @@ -211,7 +213,7 @@ def EOCEFPD(self): if not list_: return ValueError("No values returned. Check Simulate File executed correctly") else: - outputDict = {'info_ids':['MaxEFPD'], 'values': [list_[-1]] } + outputDict = {'info_ids':['MaxEFPD'], 'values': [float(1/list_[-1])]} return outputDict @@ -486,6 +488,42 @@ def burnupEOC(self): return outputDict + def fa_type(self): + ''' + Extracts the fuel type and calculates the fuel cost based on the amount and enrichment of each fuel type. + ''' + #fuel_type = [] + FAlist = [] + for line in self.lines: + if "'FUE.TYP'" in line: + p1 = line.index(",") + p2 = line.index("/") + search_space = line[p1:p2] + search_space = search_space.replace(",","") + tmp= search_space.split() + for ii in tmp: + FAlist.append(float(ii)) + FAtype = list(set(FAlist)) + FAlist_A = FAlist[0] + FAlist_B = FAlist[1:9] + FAlist[9:73:9] + FAlist_C = FAlist[10:18] + FAlist[19:27] + FAlist[28:36] + FAlist[37:45] + FAlist[46:54] + FAlist[55:63] + FAlist[64:72] + FAlist[73:81] + FAcount_A = [float(fa == FAlist_A) for fa in FAtype] + FAcount_B = [float(FAlist_B.count(fa)*2) for fa in FAtype] + FAcount_C = [float(FAlist_C.count(fa)*4) for fa in FAtype] + FAcount = [FAcount_A[j] + FAcount_B[j] + FAcount_C[j] for j in range(len(FAtype))] + print(FAcount) + #stop + #Considering that: FA type 0 is empty, type 1 reflector, type 2 2% enrichment, types 3 and 4 2.5% enrichment, and types 5 and 6 3.2% enrichment. The cost of burnable is not being considered + fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*4.94262343 + (FAcount[3] + FAcount[4])*5.67862599 + (FAcount[5] + FAcount[6])*6.7274349 + print(fuel_cost) + #fuel_type.append(float(search_space)) + #stop + if not fuel_cost: + return ValueError("No values returned. Check Simulate File executed correctly") + else: + outputDict = {'info_ids':['fuel_cost'], 'values': [fuel_cost]} + return outputDict + def writeCSV(self, fileout): """ Print Data into CSV format From c7aebf3e4b4220b4055a702b36d6ea23e3efe29f Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 6 Apr 2023 11:58:36 -0600 Subject: [PATCH 23/84] resolving diff based on different batch Size, thanks @wangcj05 --- ravenframework/Optimizers/GeneticAlgorithm.py | 14 +++++++---- .../constrained/MinwoRepMultiObjective.xml | 2 +- .../Multi_MinwoReplacement/opt_export_0.csv | 25 +++++++++---------- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index e321cc0441..bcde391287 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -593,6 +593,8 @@ def _useRealization(self, info, rlz): ############################################################################## objs_vals = [list(ele) for ele in list(zip(*self.objectiveVal))] + ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used + ## These are currently for debugging purposes import matplotlib.pyplot as plt # JY: Visualization: all points - This code block needs to be either deleted or revisited. plt.plot(np.array(objs_vals)[:,0], np.array(objs_vals)[:,1],'*') @@ -604,11 +606,11 @@ def _useRealization(self, info, rlz): plt.ylim(0,6) plt.title(str('Iteration ' + str(self.counter-1))) - plt.plot(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,0], - np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,1],'*') - for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[:,0])): - plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[i,0], - np.array(list(zip(self._optPointHistory[traj][-1][0]['obj1'], self._optPointHistory[traj][-1][0]['obj2'])))[i,1], str(self.batchId-1)) + plt.plot(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0], + np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,1],'*') + for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0])): + plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,0], + np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,1], str(self.batchId-1)) # plt.pause() ############################################################################## @@ -810,6 +812,8 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): coords={'chromosome':np.arange(np.shape(objVal)[0]), 'obj': self._objectiveVar}) if self._writeSteps == 'every': + print("### rlz.sizes['RAVEN_sample_ID'] = {}".format(rlz.sizes['RAVEN_sample_ID'])) + print("### self.population.shape is {}".format(self.population.shape)) for i in range(rlz.sizes['RAVEN_sample_ID']): rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) for j in range(len(self._objectiveVar)): diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index d6695c1571..a1d7cd7452 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -11,7 +11,7 @@ Multi_MinwoReplacement optimize,print - 2 + 1 diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv index ecd64246ae..abca0e7d1a 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv @@ -1,14 +1,13 @@ x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,fitness,accepted -4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,2.0,1.0,2.0,1.0,6.0,0.0,accepted -7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,2.0,1.0,inf,2.0,14.0,0.0,accepted -4.0,5.0,7.0,2.0,3.0,6.0,94.0,14.0,0.0,2.0,2.0,inf,1.0,6.0,0.0,accepted -6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,2.0,1.0,inf,3.0,3.0,0.0,accepted -4.0,6.0,7.0,3.0,5.0,2.0,86.0,16.0,0.0,2.0,2.0,inf,0.0,14.0,0.0,accepted -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,2.0,2.0,2.0,3.0,11.0,0.0,accepted -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,2.0,3.0,inf,3.0,11.0,0.0,accepted -7.0,5.0,3.0,2.0,6.0,4.0,88.0,17.0,0.0,2.0,3.0,inf,5.0,12.0,0.0,accepted -4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,0.0,2.0,4.0,inf,1.0,10.0,0.0,accepted -7.0,4.0,3.0,2.0,5.0,6.0,93.0,15.0,0.0,2.0,4.0,inf,5.0,7.0,0.0,accepted -4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,2.0,1.0,2.0,1.0,6.0,0.0,final -7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,2.0,1.0,inf,2.0,14.0,0.0,final -6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,2.0,1.0,inf,3.0,3.0,0.0,final +7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,accepted +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,1.0,2.0,2.0,inf,2.0,14.0,0.0,accepted +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,accepted +6.0,4.0,7.0,2.0,3.0,5.0,88.0,14.0,1.0,2.0,2.0,inf,1.0,12.0,0.0,accepted +6.0,4.0,3.0,7.0,5.0,2.0,88.0,14.0,1.0,2.0,3.0,inf,0.0,12.0,0.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,4.0,inf,3.0,11.0,0.0,accepted +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,1.0,2.0,4.0,inf,1.0,6.0,0.0,accepted +4.0,5.0,7.0,2.0,3.0,6.0,94.0,14.0,1.0,2.0,5.0,inf,1.0,6.0,0.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,5.0,inf,3.0,11.0,0.0,accepted +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,1.0,2.0,6.0,inf,1.0,10.0,0.0,accepted +7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,final +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,final From 64e97a9d5a497e2f7e882372e940c1da4e1bca3c Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Fri, 7 Apr 2023 18:25:01 -0600 Subject: [PATCH 24/84] converting SimukateData.py to unix line endings --- ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index 7b9fc67d09..fd3c3db140 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -543,4 +543,3 @@ def writeCSV(self, fileout): index=index+1 numpy.savetxt(fileObject, outputMatrix.T, delimiter=',', header=','.join(headers), comments='') fileObject.close() - From b29661b4efbc4dc1b8dd6cc91f73b50932ce2d6c Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 11 Apr 2023 16:44:45 -0600 Subject: [PATCH 25/84] regolding to print all batches in MOO --- ravenframework/Optimizers/GeneticAlgorithm.py | 23 ++++++++++++------- .../unconstrained/ZDT1/opt_export_0.csv | 10 ++++++++ .../Multi_MinwoReplacement/opt_export_0.csv | 10 ++++++++ 3 files changed, 35 insertions(+), 8 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index bcde391287..ff3e020576 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -535,14 +535,14 @@ def _useRealization(self, info, rlz): else: g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) - Fitness = np.zeros((len(offSprings), 1)) - for i in range(len(Fitness)): - Fitness[i] = countConstViolation(g.data[i]) - Fitness = [item for sublist in Fitness.tolist() for item in sublist] + fitness = np.zeros((len(offSprings), 1)) + for i in range(len(fitness)): + fitness[i] = countConstViolation(g.data[i]) + fitness = [item for sublist in fitness.tolist() for item in sublist] - Fitness = xr.DataArray(Fitness, + fitness = xr.DataArray(fitness, dims=['NumOfConstraintViolated'], - coords={'NumOfConstraintViolated':np.arange(np.shape(Fitness)[0])}) + coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) # 0.2@ n-1: Survivor selection(rlz) # update population container given obtained children @@ -576,7 +576,7 @@ def _useRealization(self, info, rlz): popObjectiveVal=self.objectiveVal, offObjectiveVal=objectiveVal, popConst = self.constraints, - offConst = Fitness, + offConst = fitness, popConstV = self.constraintsV, offConstV = g ) @@ -616,7 +616,7 @@ def _useRealization(self, info, rlz): else: self.population = offSprings - self.constraints = Fitness + self.constraints = fitness self.constraintsV = g self.rank, self.crowdingDistance = self._fitnessInstance(rlz, objVals = self._objectiveVar @@ -625,6 +625,13 @@ def _useRealization(self, info, rlz): for i in range(len(self._objectiveVar)): self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + self._collectOptPointMulti(self.population, + self.rank, + self.crowdingDistance, + self.objectiveVal, + self.constraints, + self.constraintsV) + self._resolveNewGenerationMulti(traj, rlz, info) # 1 @ n: Parent selection from population # pair parents together by indexes diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv index a4e2cdcdce..39e05c235f 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv @@ -1,4 +1,14 @@ x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted +0.902940987587,0.947612243227,0.840374259707,0.902940987587,3.96681957049,0.0,1.0,3.0,1.79769313486e+308,0.0,first +0.227236453264,0.847510234417,0.362760231915,0.227236453264,3.60499993579,0.0,1.0,2.0,1.79769313486e+308,0.0,first +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,1.0,1.0,1.79769313486e+308,0.0,first +0.633202111729,0.793545654927,0.564774226762,0.633202111729,3.28234279694,0.0,1.0,2.0,2.0,0.0,first +0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,1.0,1.0,1.32735741676,0.0,first +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,1.0,1.0,1.79769313486e+308,0.0,first +0.331692186261,0.571854743308,0.965348788995,0.331692186261,4.24730587019,0.0,1.0,3.0,1.79769313486e+308,0.0,first +0.267873673297,0.166777967281,0.847808119107,0.267873673297,3.00298144409,0.0,1.0,1.0,0.749061564967,0.0,first +0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,1.0,2.0,1.79769313486e+308,0.0,first +0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,1.0,1.0,0.672642583243,0.0,first 0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,2.0,1.0,inf,0.0,accepted 0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,2.0,1.0,inf,0.0,accepted 0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,2.0,1.0,1.17548106192,0.0,accepted diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv index abca0e7d1a..2ec6bec9a9 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv @@ -1,4 +1,14 @@ x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,fitness,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,0.0,1.0,2.0,1.79769313486e+308,-2.0,1.0,1.0,first +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,1.0,2.0,1.79769313486e+308,3.0,11.0,0.0,first +4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,0.0,1.0,1.0,1.79769313486e+308,-3.0,4.0,1.0,first +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,1.0,1.0,1.79769313486e+308,2.0,14.0,0.0,first +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,0.0,1.0,3.0,1.79769313486e+308,1.0,10.0,0.0,first +4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,0.0,1.0,4.0,1.79769313486e+308,3.0,5.0,0.0,first +4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,0.0,1.0,5.0,1.79769313486e+308,3.0,5.0,0.0,first +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,1.0,2.0,2.0,3.0,3.0,0.0,first +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,1.0,1.0,2.0,1.0,6.0,0.0,first +2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,0.0,1.0,3.0,1.79769313486e+308,-1.0,-5.0,2.0,first 7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,accepted 7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,1.0,2.0,2.0,inf,2.0,14.0,0.0,accepted 6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,accepted From 96269569d7576db900291fc952384771d895ddfa Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 11 Apr 2023 22:28:24 -0600 Subject: [PATCH 26/84] slight mods --- .../CodeInterfaceClasses/SIMULATE3/SimulateData.py | 7 +++++-- ravenframework/Optimizers/GeneticAlgorithm.py | 9 ++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index fd3c3db140..4c7134e652 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -514,10 +514,13 @@ def fa_type(self): print(FAcount) #stop #Considering that: FA type 0 is empty, type 1 reflector, type 2 2% enrichment, types 3 and 4 2.5% enrichment, and types 5 and 6 3.2% enrichment. The cost of burnable is not being considered - fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*4.94262343 + (FAcount[3] + FAcount[4])*5.67862599 + (FAcount[5] + FAcount[6])*6.7274349 + if len(FAcount) == 7: + fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*4.94262343 + (FAcount[3] + FAcount[4])*5.67862599 + (FAcount[5] + FAcount[6])*6.7274349 + else: + fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*4.94262343 + (FAcount[3] + FAcount[4])*5.67862599 + (FAcount[5])*6.7274349 print(fuel_cost) #fuel_type.append(float(search_space)) - #stop + #stop if not fuel_cost: return ValueError("No values returned. Check Simulate File executed correctly") else: diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index ff3e020576..b36b2a4607 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -602,8 +602,8 @@ def _useRealization(self, info, rlz): # JY: Visualization: optimal points only - This code block needs to be either deleted or revisited. # plt.xlim(75,100) # plt.ylim(5,20) - plt.xlim(0,1) - plt.ylim(0,6) + # plt.xlim(0,1) + # plt.ylim(0,6) plt.title(str('Iteration ' + str(self.counter-1))) plt.plot(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0], @@ -611,7 +611,7 @@ def _useRealization(self, info, rlz): for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0])): plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,0], np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,1], str(self.batchId-1)) - # plt.pause() + plt.savefig('PF.png') ############################################################################## else: @@ -830,6 +830,9 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): rlzDict['fitness'] = np.atleast_1d(self.constraints.data)[i] for ind, consName in enumerate([y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]): rlzDict['ConstraintEvaluation_'+consName] = self.constraintsV.data[i,ind] + for var in set(rlz.keys())-set(rlzDict.keys()): + if var not in ['batchId','prefix'] and not var.__contains__('Prob'): + rlzDict[var] = rlz[var] self._updateSolutionExport(traj, rlzDict, acceptable, None) # decide what to do next From 34d5cb236542881375e3ebceee7e9f8b1156b814 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 12 Apr 2023 15:31:04 -0600 Subject: [PATCH 27/84] regolding and reverting inf in fitness --- ravenframework/Optimizers/fitness/fitness.py | 1 - .../unconstrained/ZDT1/opt_export_0.csv | 12 ++++++------ .../Multi_MinwoReplacement/opt_export_0.csv | 16 ++++++++-------- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index d2481a34b6..1770ef02cc 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -103,7 +103,6 @@ def rank_crowding(rlz,**kwargs): coords={'rank': np.arange(np.shape(offSpringRank)[0])}) offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, popSize=len(offSpringRank), objectives=np.array(offspringObjsVals)) - offSpringCD[offSpringCD==np.inf] = sys.float_info.max offSpringCD = xr.DataArray(offSpringCD, dims=['CrowdingDistance'], coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv index 39e05c235f..25ec63951b 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv @@ -1,13 +1,13 @@ x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted -0.902940987587,0.947612243227,0.840374259707,0.902940987587,3.96681957049,0.0,1.0,3.0,1.79769313486e+308,0.0,first -0.227236453264,0.847510234417,0.362760231915,0.227236453264,3.60499993579,0.0,1.0,2.0,1.79769313486e+308,0.0,first -0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,1.0,1.0,1.79769313486e+308,0.0,first +0.902940987587,0.947612243227,0.840374259707,0.902940987587,3.96681957049,0.0,1.0,3.0,inf,0.0,first +0.227236453264,0.847510234417,0.362760231915,0.227236453264,3.60499993579,0.0,1.0,2.0,inf,0.0,first +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,1.0,1.0,inf,0.0,first 0.633202111729,0.793545654927,0.564774226762,0.633202111729,3.28234279694,0.0,1.0,2.0,2.0,0.0,first 0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,1.0,1.0,1.32735741676,0.0,first -0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,1.0,1.0,1.79769313486e+308,0.0,first -0.331692186261,0.571854743308,0.965348788995,0.331692186261,4.24730587019,0.0,1.0,3.0,1.79769313486e+308,0.0,first +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,1.0,1.0,inf,0.0,first +0.331692186261,0.571854743308,0.965348788995,0.331692186261,4.24730587019,0.0,1.0,3.0,inf,0.0,first 0.267873673297,0.166777967281,0.847808119107,0.267873673297,3.00298144409,0.0,1.0,1.0,0.749061564967,0.0,first -0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,1.0,2.0,1.79769313486e+308,0.0,first +0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,1.0,2.0,inf,0.0,first 0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,1.0,1.0,0.672642583243,0.0,first 0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,2.0,1.0,inf,0.0,accepted 0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,2.0,1.0,inf,0.0,accepted diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv index 2ec6bec9a9..dc39c524eb 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv @@ -1,14 +1,14 @@ x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,fitness,accepted -4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,0.0,1.0,2.0,1.79769313486e+308,-2.0,1.0,1.0,first -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,1.0,2.0,1.79769313486e+308,3.0,11.0,0.0,first -4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,0.0,1.0,1.0,1.79769313486e+308,-3.0,4.0,1.0,first -7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,1.0,1.0,1.79769313486e+308,2.0,14.0,0.0,first -4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,0.0,1.0,3.0,1.79769313486e+308,1.0,10.0,0.0,first -4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,0.0,1.0,4.0,1.79769313486e+308,3.0,5.0,0.0,first -4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,0.0,1.0,5.0,1.79769313486e+308,3.0,5.0,0.0,first +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,0.0,1.0,2.0,inf,-2.0,1.0,1.0,first +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,1.0,2.0,inf,3.0,11.0,0.0,first +4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,0.0,1.0,1.0,inf,-3.0,4.0,1.0,first +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,1.0,1.0,inf,2.0,14.0,0.0,first +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,0.0,1.0,3.0,inf,1.0,10.0,0.0,first +4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,0.0,1.0,4.0,inf,3.0,5.0,0.0,first +4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,0.0,1.0,5.0,inf,3.0,5.0,0.0,first 6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,1.0,2.0,2.0,3.0,3.0,0.0,first 4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,1.0,1.0,2.0,1.0,6.0,0.0,first -2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,0.0,1.0,3.0,1.79769313486e+308,-1.0,-5.0,2.0,first +2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,0.0,1.0,3.0,inf,-1.0,-5.0,2.0,first 7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,accepted 7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,1.0,2.0,2.0,inf,2.0,14.0,0.0,accepted 6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,accepted From e0df314aadfee6d6280560e359bad987ba1e40cd Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 12 Apr 2023 15:35:02 -0600 Subject: [PATCH 28/84] trying to add all outputs to the rlz --- ravenframework/Optimizers/GeneticAlgorithm.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index b36b2a4607..a2b2c9b134 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -822,7 +822,10 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): print("### rlz.sizes['RAVEN_sample_ID'] = {}".format(rlz.sizes['RAVEN_sample_ID'])) print("### self.population.shape is {}".format(self.population.shape)) for i in range(rlz.sizes['RAVEN_sample_ID']): - rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) + varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) + rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) + + # rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) for j in range(len(self._objectiveVar)): rlzDict[self._objectiveVar[j]] = objVal.data[i][j] rlzDict['rank'] = np.atleast_1d(self.rank.data)[i] From c0476f703b832ea1c582b806a13953df60e44648 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 12 Apr 2023 19:25:53 -0600 Subject: [PATCH 29/84] adding everything to bestPoint --- ravenframework/Optimizers/GeneticAlgorithm.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index a2b2c9b134..031550a20f 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -611,7 +611,7 @@ def _useRealization(self, info, rlz): for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0])): plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,0], np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,1], str(self.batchId-1)) - plt.savefig('PF.png') + plt.savefig('PF'+str(i)+'.png') ############################################################################## else: @@ -833,15 +833,14 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): rlzDict['fitness'] = np.atleast_1d(self.constraints.data)[i] for ind, consName in enumerate([y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]): rlzDict['ConstraintEvaluation_'+consName] = self.constraintsV.data[i,ind] - for var in set(rlz.keys())-set(rlzDict.keys()): - if var not in ['batchId','prefix'] and not var.__contains__('Prob'): - rlzDict[var] = rlz[var] self._updateSolutionExport(traj, rlzDict, acceptable, None) # decide what to do next if acceptable in ['accepted', 'first']: # record history bestRlz = {} + varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) + bestRlz = dict((var,np.atleast_1d(rlz[var].data)) for var in set(varList) if var in rlz.data_vars) for i in range(len(self._objectiveVar)): bestRlz[self._objectiveVar[i]] = [item[i] for item in self.multiBestObjective] bestRlz['fitness'] = self.multiBestFitness From 81dc58024df90ccb4a1ff999f53816425bd20e90 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 13 Apr 2023 14:39:01 -0600 Subject: [PATCH 30/84] chenging type==str to len(self._objectVar) == 1 --- ravenframework/Optimizers/GeneticAlgorithm.py | 2 +- ravenframework/Optimizers/RavenSampled.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 031550a20f..a77557e0fc 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -611,7 +611,7 @@ def _useRealization(self, info, rlz): for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0])): plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,0], np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,1], str(self.batchId-1)) - plt.savefig('PF'+str(i)+'.png') + plt.savefig('PF'+str(i)+'_'+str(self.counter-1)+'.png') ############################################################################## else: diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 70b0518c3d..3727f0d41c 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -408,7 +408,7 @@ def finalizeSampler(self, failedRuns): self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' + 'Perhaps the Model failed?') - if type(self._objectiveVar) == str: + if len(self._objectiveVar) == 1: opt = self._optPointHistory[traj][-1][0] val = opt[self._objectiveVar] self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val)) @@ -576,7 +576,7 @@ def _handleImplicitConstraints(self, previous): @ Out, accept, bool, whether point was satisfied implicit constraints """ normed = copy.deepcopy(previous) - if type(self._objectiveVar) == str: + if len(self._objectiveVar) == 1: oldVal = normed[self._objectiveVar] else: oldVal = normed[self._objectiveVar[0]] @@ -650,7 +650,7 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): # TODO could we ever use old rerun gradients to inform the gradient direction as well? self._rerunsSinceAccept[traj] += 1 N = self._rerunsSinceAccept[traj] + 1 - if type(self._objectiveVar) == str: + if len(self._objectiveVar) == 1: oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar] else: oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]] From 3f27965b456f28dc644d439bf35c63b9970dec3b Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 18 Apr 2023 13:11:26 -0600 Subject: [PATCH 31/84] removing unnecessary if statement, this needs revisiting --- ravenframework/Optimizers/RavenSampled.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 3727f0d41c..82505cf3e2 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -576,10 +576,7 @@ def _handleImplicitConstraints(self, previous): @ Out, accept, bool, whether point was satisfied implicit constraints """ normed = copy.deepcopy(previous) - if len(self._objectiveVar) == 1: - oldVal = normed[self._objectiveVar] - else: - oldVal = normed[self._objectiveVar[0]] + oldVal = normed[self._objectiveVar[0]] normed.pop(self._objectiveVar[0], oldVal) denormed = self.denormalizeData(normed) denormed[self._objectiveVar[0]] = oldVal @@ -651,7 +648,7 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): self._rerunsSinceAccept[traj] += 1 N = self._rerunsSinceAccept[traj] + 1 if len(self._objectiveVar) == 1: - oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar] + oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]] else: oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]] newAvg = ((N-1)*oldVal + optVal) / N From facf74e9c5e87b3e3e7a37c05308129b35e5b9cd Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 20 Apr 2023 13:39:38 -0600 Subject: [PATCH 32/84] modifying reverting cycle length to its value not the inverse --- ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index 4c7134e652..f66f479581 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -213,7 +213,7 @@ def EOCEFPD(self): if not list_: return ValueError("No values returned. Check Simulate File executed correctly") else: - outputDict = {'info_ids':['MaxEFPD'], 'values': [float(1/list_[-1])]} + outputDict = {'info_ids':['MaxEFPD'], 'values': [float(list_[-1])]} return outputDict From a92049c0b723a500f31f64b46a9a24b082762f00 Mon Sep 17 00:00:00 2001 From: Kim Date: Mon, 12 Jun 2023 10:48:20 -0600 Subject: [PATCH 33/84] simulateData updating cost model. --- .../CodeInterfaceClasses/SIMULATE3/SimulateData.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index f66f479581..18044a736b 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -213,7 +213,7 @@ def EOCEFPD(self): if not list_: return ValueError("No values returned. Check Simulate File executed correctly") else: - outputDict = {'info_ids':['MaxEFPD'], 'values': [float(list_[-1])]} + outputDict = {'info_ids':['MaxEFPD'], 'values': [list_[-1]]} return outputDict @@ -515,9 +515,9 @@ def fa_type(self): #stop #Considering that: FA type 0 is empty, type 1 reflector, type 2 2% enrichment, types 3 and 4 2.5% enrichment, and types 5 and 6 3.2% enrichment. The cost of burnable is not being considered if len(FAcount) == 7: - fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*4.94262343 + (FAcount[3] + FAcount[4])*5.67862599 + (FAcount[5] + FAcount[6])*6.7274349 + fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*2.69520839 + (FAcount[3] + FAcount[4])*3.24678409 + (FAcount[5] + FAcount[6])*4.03739539 else: - fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*4.94262343 + (FAcount[3] + FAcount[4])*5.67862599 + (FAcount[5])*6.7274349 + fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*2.69520839 + (FAcount[3] + FAcount[4])*3.24678409 + (FAcount[5])*4.03739539 print(fuel_cost) #fuel_type.append(float(search_space)) #stop From 0faeb9c9157eb90f40fa95585fbc5eb05ab8c916 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sat, 15 Jul 2023 11:13:57 -0600 Subject: [PATCH 34/84] minor change is made in ZDT1 test. --- .../Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py index ca53da447d..829307f73e 100644 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py @@ -23,7 +23,7 @@ def evaluate(Inputs): for ind,var in enumerate(Inputs.keys()): # write the objective function here if (ind == 0) : - obj1 += Inputs[var] + obj1 = Inputs[var] if (ind != 0): Sum += Inputs[var] g = 1 + (9/len(Inputs.keys())*Sum ) @@ -39,3 +39,5 @@ def run(self,Inputs): @ Out, None """ self.obj1,self.obj2 = evaluate(Inputs) + + From dbad22c1a00890fce2ab59cbf83b49e41a2d4768 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Thu, 27 Jul 2023 08:52:36 -0600 Subject: [PATCH 35/84] myConstraints for MultiSum is updated. --- .../MultiSumwConst/myConstraints.py | 34 ++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py index f71254ec67..ec32efb940 100644 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py @@ -105,4 +105,36 @@ def impConstr3(Input): @ out, g, float, implicit constraint 3 evaluation function """ g = 100 - Input.obj1 - return g \ No newline at end of file + return g + +def impConstr4(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 3 evaluation function + """ + g = Input.obj2 - 20 + return g + + """ + Evaluates the implicit constraint function at a given point/solution ($\vec(x)$) + @ In, Input, object, RAVEN container + @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function + the way the constraint is designed is that + the constraint function has to be >= 0, + so if: + 1) f(x,y) >= 0 then g = f + 2) f(x,y) >= a then g = f - a + 3) f(x,y) <= b then g = b - f + 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint) + """ + """ + Let's assume that the constraint is: + $ x3+x4 < 8 $ + then g the constraint evaluation function (which has to be > 0) is taken to be: + g = 8 - (x3+x4) + in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa + @ In, Input, object, RAVEN container + @ out, g, float, explicit constraint 1 evaluation function + """ \ No newline at end of file From 699b3deede03040f78012d92f4bc50a1e1a40838 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Mon, 7 Aug 2023 18:45:20 -0600 Subject: [PATCH 36/84] Two issues are resolved: population and objective value mismatch, min/max gives effects to constraints. --- ravenframework/Optimizers/GeneticAlgorithm.py | 11 +++++++---- ravenframework/Optimizers/RavenSampled.py | 3 ++- .../MultiSumwConst/MinwoRepMultiObjective.xml | 8 ++++---- .../constrained/MultiSumwConst/myConstraints.py | 12 +++++++++++- 4 files changed, 24 insertions(+), 10 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index a77557e0fc..566bea0ba1 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -363,7 +363,7 @@ def handleInput(self, paramInput): # Currently, only InvLin and feasibleFirst Fitnesses deal with constrained optimization # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst','rank_crowding']: - self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear and feasibleFirst fitnesses, whereas provided fitness is {self._fitnessType}') + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, feasibleFirst and rank_crowding as a fitness, whereas provided fitness is {self._fitnessType}') self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) @@ -525,7 +525,10 @@ def _useRealization(self, info, rlz): for index,individual in enumerate(offSprings): newOpt = individual + objOpt = dict(zip(self._objectiveVar, + list(map(lambda x:-1 if x=="max" else 1 , self._minMax)))) opt = dict(zip(self._objectiveVar, [item[index] for item in objectiveVal])) + opt = {k: objOpt[k]*opt[k] for k in opt} for p, v in constraintData.items(): opt[p] = v[index] @@ -823,9 +826,9 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): print("### self.population.shape is {}".format(self.population.shape)) for i in range(rlz.sizes['RAVEN_sample_ID']): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) - rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) - - # rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) + # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) + rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) + rlzDict['batchId'] = rlz['batchId'][i] for j in range(len(self._objectiveVar)): rlzDict[self._objectiveVar[j]] = objVal.data[i][j] rlzDict['rank'] = np.atleast_1d(self.rank.data)[i] diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 82505cf3e2..1f12715d9d 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -721,6 +721,7 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): if 'max' in self._minMax: objValue *= -1 toExport[self._objectiveVar[0]] = objValue + else: # Multi Objective Optimization for i in range(len(self._objectiveVar)): objValue = rlz[self._objectiveVar[i]] @@ -739,7 +740,7 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): toExport[var] = rlz[var] # formatting toExport = dict((var, np.atleast_1d(val)) for var, val in toExport.items()) - self._solutionExport.addRealization(toExport) + self._solutionExport.addRealization(toExport) # It will stop when it is final def _addToSolutionExport(self, traj, rlz, acceptable): """ diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml index a32463eb13..700d248103 100644 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml @@ -57,14 +57,14 @@ - 2 + 3 42 every - min + min, max - 10 + 15 tournamentSelection @@ -110,7 +110,7 @@ - 10 + 15 050877 diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py index ec32efb940..4d3b5f51c9 100644 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py @@ -114,7 +114,17 @@ def impConstr4(Input): @ In, Input, object, RAVEN container @ out, g, float, implicit constraint 3 evaluation function """ - g = Input.obj2 - 20 + g = Input.obj2 - 16 + return g + +def impConstr5(Input): + """ + The implicit constraint involves variables from the output space, for example the objective variable or + a dependent variable that is not in the optimization search space + @ In, Input, object, RAVEN container + @ out, g, float, implicit constraint 3 evaluation function + """ + g = 200 - Input.obj1 return g """ From 8cffedbeaff82f0f04efa79a1043adc7aaa27426 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Tue, 8 Aug 2023 15:32:46 -0600 Subject: [PATCH 37/84] minor things are corrected. Nothing important. --- ravenframework/Optimizers/RavenSampled.py | 3 +-- ravenframework/utils/frontUtils.py | 10 ++-------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 1f12715d9d..82505cf3e2 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -721,7 +721,6 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): if 'max' in self._minMax: objValue *= -1 toExport[self._objectiveVar[0]] = objValue - else: # Multi Objective Optimization for i in range(len(self._objectiveVar)): objValue = rlz[self._objectiveVar[i]] @@ -740,7 +739,7 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): toExport[var] = rlz[var] # formatting toExport = dict((var, np.atleast_1d(val)) for var, val in toExport.items()) - self._solutionExport.addRealization(toExport) # It will stop when it is final + self._solutionExport.addRealization(toExport) def _addToSolutionExport(self, traj, rlz, acceptable): """ diff --git a/ravenframework/utils/frontUtils.py b/ravenframework/utils/frontUtils.py index 8bb0e11c72..94b4b3efd6 100644 --- a/ravenframework/utils/frontUtils.py +++ b/ravenframework/utils/frontUtils.py @@ -80,7 +80,7 @@ def rankNonDominatedFrontiers(data): @ out, nonDominatedRank, list, a list of length nPoints that has the ranking of the front passing through each point """ - ## tentative code block start + # tentative code block start # import matplotlib.pyplot as plt # from mpl_toolkits.mplot3d import Axes3D # xdata = [item[0] for item in data] @@ -109,6 +109,7 @@ def rankNonDominatedFrontiers(data): # ax3d.text(x, y, z, label) # plt.title("Data") # plt.show() + return nonDominatedRank def crowdingDistance(rank, popSize, objectives): @@ -119,13 +120,6 @@ def crowdingDistance(rank, popSize, objectives): @ In, objectives, np.array, matrix contains objective values for each element of the population @ Out, crowdDist, np.array, array of crowding distances """ - # # tentative code block start - # import matplotlib.pyplot as plt - # from mpl_toolkits.mplot3d import Axes3D - # xdata = [item[0] for item in objectives] - # ydata = [item[1] for item in objectives] - # zdata = [item[2] for item in objectives] - crowdDist = np.zeros(popSize) fronts = np.unique(rank) fronts = fronts[fronts!=np.inf] From 9f4eecdc018fd24aaa8b21a92ecf350642a9a742 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Tue, 8 Aug 2023 15:36:44 -0600 Subject: [PATCH 38/84] Additional minor changes are made. Nothing important. --- ravenframework/utils/frontUtils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/ravenframework/utils/frontUtils.py b/ravenframework/utils/frontUtils.py index 94b4b3efd6..8302ed247a 100644 --- a/ravenframework/utils/frontUtils.py +++ b/ravenframework/utils/frontUtils.py @@ -80,7 +80,7 @@ def rankNonDominatedFrontiers(data): @ out, nonDominatedRank, list, a list of length nPoints that has the ranking of the front passing through each point """ - # tentative code block start + ## tentative code block start # import matplotlib.pyplot as plt # from mpl_toolkits.mplot3d import Axes3D # xdata = [item[0] for item in data] @@ -109,7 +109,6 @@ def rankNonDominatedFrontiers(data): # ax3d.text(x, y, z, label) # plt.title("Data") # plt.show() - return nonDominatedRank def crowdingDistance(rank, popSize, objectives): @@ -120,6 +119,13 @@ def crowdingDistance(rank, popSize, objectives): @ In, objectives, np.array, matrix contains objective values for each element of the population @ Out, crowdDist, np.array, array of crowding distances """ + # # tentative code block start + # import matplotlib.pyplot as plt + # from mpl_toolkits.mplot3d import Axes3D + # xdata = [item[0] for item in data] + # ydata = [item[1] for item in data] + # zdata = [item[2] for item in data] + crowdDist = np.zeros(popSize) fronts = np.unique(rank) fronts = fronts[fronts!=np.inf] From 2487621433fd08755a5cb371118ea33141d4c5cb Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Tue, 8 Aug 2023 15:38:46 -0600 Subject: [PATCH 39/84] Additional minor change is made. Nothing important. --- ravenframework/utils/frontUtils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ravenframework/utils/frontUtils.py b/ravenframework/utils/frontUtils.py index 8302ed247a..8bb0e11c72 100644 --- a/ravenframework/utils/frontUtils.py +++ b/ravenframework/utils/frontUtils.py @@ -122,9 +122,9 @@ def crowdingDistance(rank, popSize, objectives): # # tentative code block start # import matplotlib.pyplot as plt # from mpl_toolkits.mplot3d import Axes3D - # xdata = [item[0] for item in data] - # ydata = [item[1] for item in data] - # zdata = [item[2] for item in data] + # xdata = [item[0] for item in objectives] + # ydata = [item[1] for item in objectives] + # zdata = [item[2] for item in objectives] crowdDist = np.zeros(popSize) fronts = np.unique(rank) From 3657634f61ad1d93a5c95b615bd40ca55f3e19d0 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Wed, 30 Aug 2023 10:08:19 -0600 Subject: [PATCH 40/84] fitness data structure is changed from data xarray to dataSet. It works well with sinlge objective optimization. --- ravenframework/Optimizers/GeneticAlgorithm.py | 83 +++++++++++------ ravenframework/Optimizers/fitness/fitness.py | 93 +++++++++++++++---- .../parentSelectors/parentSelectors.py | 19 ++-- .../survivorSelectors/survivorSelectors.py | 15 ++- .../constrained/MinwoRepMultiObjective.xml | 14 +-- .../constrained/testGAMinwRepConstrained.xml | 10 +- 6 files changed, 164 insertions(+), 70 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 566bea0ba1..e6276f9020 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -142,7 +142,7 @@ def getInputSpecification(cls): \item tournamentSelection. \item rankSelection. \end{itemize} - \item Reproduction: + \item Reproduction: \begin{itemize} \item crossover: \begin{itemize} @@ -158,11 +158,16 @@ def getInputSpecification(cls): \item bitFlipMutator. \end{itemize} \end{itemize} - \item survivorSelectors: + \item survivorSelectors: \begin{itemize} \item ageBased. \item fitnessBased. \end{itemize} + \item constraintHandling: + \begin{itemize} + \item hard. + \item soft. + \end{itemize} \end{itemize}""") # Population Size populationSize = InputData.parameterInputFactory('populationSize', strictMode=True, @@ -170,6 +175,14 @@ def getInputSpecification(cls): printPriority=108, descr=r"""The number of chromosomes in each population.""") GAparams.addSub(populationSize) + + # Constraint Handling + constraintHandling = InputData.parameterInputFactory('constraintHandling', strictMode=True, + contentType=InputTypes.StringType, + printPriority=108, + descr=r"""a node indicating whether GA will handle constraints hardly or softly.""") + GAparams.addSub(constraintHandling) + # Parent Selection parentSelection = InputData.parameterInputFactory('parentSelection', strictMode=True, contentType=InputTypes.StringType, @@ -358,11 +371,9 @@ def handleInput(self, paramInput): # Fitness fitnessNode = gaParamsNode.findFirst('fitness') self._fitnessType = fitnessNode.parameterValues['type'] - # Check if the fitness requested is among the constrained optimization fitnesses - # Currently, only InvLin and feasibleFirst Fitnesses deal with constrained optimization # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. - if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst','rank_crowding']: + if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst','hardConstraint']: self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, feasibleFirst and rank_crowding as a fitness, whereas provided fitness is {self._fitnessType}') self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None @@ -463,16 +474,15 @@ def _useRealization(self, info, rlz): for y in (self._constraintFunctions + self._impConstraintFunctions): params += y.parameterNames() for p in list(set(params) -set([self._objectiveVar[0]]) -set(list(self.toBeSampled.keys()))): - # for p in list(set(params) -set([self._objectiveVar]) -set(list(self.toBeSampled.keys()))): constraintData[p] = list(np.atleast_1d(rlz[p].data)) # Compute constraint function g_j(x) for all constraints (j = 1 .. J) # and all x's (individuals) in the population g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) g = xr.DataArray(g0, - dims=['chromosome','Constraint'], - coords={'chromosome':np.arange(np.shape(offSprings)[0]), - 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) + dims=['chromosome','Constraint'], + coords={'chromosome':np.arange(np.shape(offSprings)[0]), + 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) # FIXME The constraint handling is following the structure of the RavenSampled.py, # there are many utility functions that can be simplified and/or merged together # _check, _handle, and _apply, for explicit and implicit constraints. @@ -519,9 +529,9 @@ def _useRealization(self, info, rlz): g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) g = xr.DataArray(g0, - dims=['chromosome','Constraint'], - coords={'chromosome':np.arange(np.shape(offSprings)[0]), - 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) + dims=['chromosome','Constraint'], + coords={'chromosome':np.arange(np.shape(offSprings)[0]), + 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) for index,individual in enumerate(offSprings): newOpt = individual @@ -538,14 +548,13 @@ def _useRealization(self, info, rlz): else: g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) - fitness = np.zeros((len(offSprings), 1)) - for i in range(len(fitness)): - fitness[i] = countConstViolation(g.data[i]) - fitness = [item for sublist in fitness.tolist() for item in sublist] - - fitness = xr.DataArray(fitness, - dims=['NumOfConstraintViolated'], - coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) + offSpringFitness = self._fitnessInstance(rlz, + objVar=self._objectiveVar, + a=self._objCoeff, + b=self._penaltyCoeff, + penalty=None, + constraintFunction=g, + type=self._minMax) # 0.2@ n-1: Survivor selection(rlz) # update population container given obtained children @@ -579,7 +588,7 @@ def _useRealization(self, info, rlz): popObjectiveVal=self.objectiveVal, offObjectiveVal=objectiveVal, popConst = self.constraints, - offConst = fitness, + offConst = offSpringFitness, popConstV = self.constraintsV, offConstV = g ) @@ -619,11 +628,27 @@ def _useRealization(self, info, rlz): else: self.population = offSprings - self.constraints = fitness + self.constraints = offSpringFitness self.constraintsV = g - self.rank, self.crowdingDistance = self._fitnessInstance(rlz, - objVals = self._objectiveVar - ) + + offObjVal = [] + for i in range(len(self._objectiveVar)): + offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + offspringObjsVals = [list(ele) for ele in list(zip(*offObjVal))] + + offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringObjsVals)) + self.rank = xr.DataArray(offSpringRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(offSpringRank)[0])}) + + offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, + popSize=len(offSpringRank), + objectives=np.array(offspringObjsVals)) + self.crowdingDistance = xr.DataArray(offSpringCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) + self.objectiveVal = [] for i in range(len(self._objectiveVar)): self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) @@ -775,7 +800,7 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) rlzDict[self._objectiveVar[0]] = np.atleast_1d(rlz[self._objectiveVar[0]].data)[i] - rlzDict['fitness'] = np.atleast_1d(fitness.data)[i] + rlzDict['fitness'] = np.atleast_1d(fitness.to_array()[:,i]) for ind, consName in enumerate(g['Constraint'].values): rlzDict['ConstraintEvaluation_'+consName] = g[i,ind] self._updateSolutionExport(traj, rlzDict, acceptable, None) @@ -867,12 +892,14 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g): @ In, fitness, xr.DataArray, fitness values at each chromosome of the realization @ Out, point, dict, point used in this realization """ - varList = list(self.toBeSampled.keys()) + self._solutionExport.getVars('input') + self._solutionExport.getVars('output') varList = set(varList) selVars = [var for var in varList if var in rlz.data_vars] population = datasetToDataArray(rlz, selVars) - optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),np.atleast_1d(fitness.data),objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))]) + if self._fitnessType == 'hardConstraint': + optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1],-x[2]))]) + else: + optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))]) point = dict((var,float(optPoints[0][i])) for i, var in enumerate(selVars) if var in rlz.data_vars) gOfBest = dict(('ConstraintEvaluation_'+name,float(gOfBest[0][i])) for i, name in enumerate(g.coords['Constraint'].values)) if (self.counter > 1 and obj[0] <= self.bestObjective and fit[0] >= self.bestFitness) or self.counter == 1: diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 1770ef02cc..6dc500e641 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -20,6 +20,7 @@ """ # Internal Modules---------------------------------------------------------------------------------- from ...utils import frontUtils +from ..parentSelectors.parentSelectors import countConstViolation # External Imports import numpy as np @@ -75,8 +76,8 @@ def invLinear(rlz,**kwargs): fitness = -a * (rlz[objVar].data).reshape(-1,1) - b * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1) fitness = xr.DataArray(np.squeeze(fitness), - dims=['chromosome'], - coords={'chromosome': np.arange(len(data))}) + dims=['chromosome'], + coords={'chromosome': np.arange(len(data))}) return fitness def rank_crowding(rlz,**kwargs): @@ -109,6 +110,34 @@ def rank_crowding(rlz,**kwargs): return offSpringRank, offSpringCD + +def hardConstraint(rlz,**kwargs): + r""" + Multiobjective optimization using NSGA-II requires the rank and crowding distance values to the objective function + + @ In, rlz, xr.Dataset, containing the evaluation of a certain + set of individuals (can be the initial population for the very first iteration, + or a population of offsprings) + @ In, kwargs, dict, dictionary of parameters for this rank_crowding method: + objVar, string, the names of the objective variables + @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome. + offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome. + """ + objVar = kwargs['objVar'] + g = kwargs['constraintFunction'] + data = np.atleast_1d(rlz[objVar].data) + fitness = np.zeros((len(data), 1)) + for i in range(len(fitness)): + fitness[i] = countConstViolation(g.data[i]) + fitness = [-item for sublist in fitness.tolist() for item in sublist] + + fitness = xr.DataArray(fitness, + dims=['NumOfConstraintViolated'], + coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) + + return fitness + + def feasibleFirst(rlz,**kwargs): r""" Efficient Parameter-less Feasible First Penalty Fitness method @@ -117,11 +146,13 @@ def feasibleFirst(rlz,**kwargs): 1. As the objective function decreases (comes closer to the min value), the fitness value increases 2. As the objective function increases (away from the min value), the fitness value decreases 3. As the solution violates the constraints the fitness should decrease and hence the solution is less favored by the algorithm. - 4. For the violating solutions, the fitness is starts from the worst solution in the population + 4. For the violating solutions, the fitness starts from the worst solution in the population (i.e., max objective in minimization problems and min objective in maximization problems) For maximization problems the objective value is multiplied by -1 and hence the previous trends are inverted. A great quality of this fitness is that if the objective value is equal for multiple solutions it selects the furthest from constraint violation. + + Reference: Deb, Kalyanmoy. "An efficient constraint handling method for genetic algorithms." Computer methods in applied mechanics and engineering 186.2-4 (2000): 311-338. .. math:: @@ -139,23 +170,46 @@ def feasibleFirst(rlz,**kwargs): 'constraintFunction', xr.Dataarray, containing all constraint functions (explicit and implicit) evaluations for the whole population @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - objVar = kwargs['objVar'] + if isinstance(kwargs['objVar'], str) == True: + objVar = [kwargs['objVar']] + else: + objVar = kwargs['objVar'] g = kwargs['constraintFunction'] - data = np.atleast_1d(rlz[objVar].data) - worstObj = max(data) - fitness = [] - for ind in range(data.size): - if np.all(g.data[ind, :]>=0): - fit=(data[ind]) + for i in range(len(objVar)): + data = np.atleast_1d(rlz[objVar][objVar[i]].data) + worstObj = max(data) + fitness = [] + for ind in range(data.size): + if np.all(g.data[ind, :]>=0): + fit=(data[ind]) + else: + fit = worstObj + for constInd,_ in enumerate(g['Constraint'].data): + fit+=(max(0,-1 * g.data[ind, constInd])) + fitness.append(-1 * fit) + fitness = xr.DataArray(np.array(fitness), + dims=['chromosome'], + coords={'chromosome': np.arange(len(data))}) + if i == 0: + fitnessSet = fitness.to_dataset(name = objVar[i]) else: - fit = worstObj - for constInd,_ in enumerate(g['Constraint'].data): - fit+=(max(0,-1 * g.data[ind, constInd])) - fitness.append(-1 * fit) - fitness = xr.DataArray(np.array(fitness), - dims=['chromosome'], - coords={'chromosome': np.arange(len(data))}) - return fitness + fitnessSet[objVar[i]] = fitness + ### This code block is for sinlge objective ### + # data = np.atleast_1d(rlz[objVar].data) + # worstObj = max(data) + # fitness = [] + # for ind in range(data.size): + # if np.all(g.data[ind, :]>=0): + # fit=(data[ind]) + # else: + # fit = worstObj + # for constInd,_ in enumerate(g['Constraint'].data): + # fit+=(max(0,-1 * g.data[ind, constInd])) + # fitness.append(-1 * fit) + # fitness = xr.DataArray(np.array(fitness), + # dims=['chromosome'], + # coords={'chromosome': np.arange(len(data))}) + return fitnessSet def logistic(rlz,**kwargs): """ @@ -201,6 +255,7 @@ def logistic(rlz,**kwargs): __fitness['logistic'] = logistic __fitness['feasibleFirst'] = feasibleFirst __fitness['rank_crowding'] = rank_crowding +__fitness['hardConstraint'] = hardConstraint def returnInstance(cls, name): @@ -211,5 +266,5 @@ def returnInstance(cls, name): @ Out, __crossovers[name], instance of class """ if name not in __fitness: - cls.raiseAnError (IOError, "{} FITNESS FUNCTION NOT IMPLEMENTED!!!!!".format(name)) + cls.raiseAnError (IOError, "{} is not a supported fitness function. ".format(name)) return __fitness[name] diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py index 66bf37932f..cf7d5b33f4 100644 --- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py +++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py @@ -21,10 +21,15 @@ Created June,16,2020 @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi """ - +# External Modules---------------------------------------------------------------------------------- import numpy as np import xarray as xr from ...utils import randomUtils +# External Modules---------------------------------------------------------------------------------- + +# Internal Modules---------------------------------------------------------------------------------- +from ...utils.gaUtils import dataArrayToDict, datasetToDataArray +# Internal Modules End------------------------------------------------------------------------------ # For mandd: to be updated with RAVEN official tools from itertools import combinations @@ -42,7 +47,7 @@ def rouletteWheel(population,**kwargs): """ # Arguments pop = population - fitness = kwargs['fitness'] + fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) nParents= kwargs['nParents'] # if nparents = population size then do nothing (whole population are parents) if nParents == pop.shape[0]: @@ -62,11 +67,11 @@ def rouletteWheel(population,**kwargs): roulettePointer = randomUtils.random(dim=1, samples=1) # initialize Probability counter = 0 - if np.all(fitness.data>=0) or np.all(fitness.data<=0): - selectionProb = fitness.data/np.sum(fitness.data) # Share of the pie (rouletteWheel) + if np.all(fitness>=0) or np.all(fitness<=0): + selectionProb = fitness/np.sum(fitness) # Share of the pie (rouletteWheel) else: # shift the fitness to be all positive - shiftedFitness = fitness.data + abs(min(fitness.data)) + shiftedFitness = fitness + abs(min(fitness)) selectionProb = shiftedFitness/np.sum(shiftedFitness) # Share of the pie (rouletteWheel) sumProb = selectionProb[counter] @@ -109,11 +114,11 @@ def tournamentSelection(population,**kwargs): matrixOperationRaw[:,3] = np.transpose(constraintInfo.data) matrixOperation = np.zeros((popSize,len(matrixOperationRaw[0]))) else: - fitness = kwargs['fitness'] + fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) multiObjectiveRanking = False matrixOperationRaw = np.zeros((popSize,2)) matrixOperationRaw[:,0] = np.transpose(np.arange(popSize)) - matrixOperationRaw[:,1] = np.transpose(fitness.data) + matrixOperationRaw[:,1] = np.transpose(fitness) matrixOperation = np.zeros((popSize,2)) indexes = list(np.arange(popSize)) diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index 6da9f67071..6848c22275 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -21,10 +21,15 @@ Created June,16,2020 @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi """ - +# External Modules---------------------------------------------------------------------------------- import numpy as np import xarray as xr from ravenframework.utils import frontUtils +# External Modules End------------------------------------------------------------------------------ + +# Internal Modules---------------------------------------------------------------------------------- +from ...utils.gaUtils import dataArrayToDict, datasetToDataArray +# Internal Modules End------------------------------------------------------------------------------ # @profile def ageBased(newRlz,**kwargs): @@ -97,11 +102,12 @@ def fitnessBased(newRlz,**kwargs): else: popAge = kwargs['age'] - offSpringsFitness = np.atleast_1d(kwargs['offSpringsFitness']) + offSpringsFitness = datasetToDataArray(kwargs['offSpringsFitness'], list(kwargs['offSpringsFitness'].keys())).data + offSpringsFitness = np.array([item for sublist in offSpringsFitness for item in sublist]) offSprings = np.atleast_2d(newRlz[kwargs['variables']].to_array().transpose().data) population = np.atleast_2d(kwargs['population'].data) - popFitness = np.atleast_1d(kwargs['fitness'].data) - + popFitness = datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data + popFitness = np.array([item for sublist in popFitness for item in sublist]) newPopulation = population newFitness = popFitness newAge = list(map(lambda x:x+1, popAge)) @@ -123,6 +129,7 @@ def fitnessBased(newRlz,**kwargs): newFitness = xr.DataArray(newFitness, dims=['chromosome'], coords={'chromosome':np.arange(np.shape(newFitness)[0])}) + newFitness = newFitness.to_dataset(name = list(kwargs['fitness'].keys())[0]) #return newPopulationArray,newFitness,newAge return newPopulationArray,newFitness,newAge,kwargs['popObjectiveVal'] diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index a1d7cd7452..05460107a7 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -9,7 +9,7 @@
- Multi_MinwoReplacement + Multi_MinwoReplacement_wo_constraints_20_5 optimize,print 1 @@ -57,14 +57,13 @@ - 2 + 5 42 every min,min - - 10 + 20 tournamentSelection @@ -74,7 +73,7 @@ 0.7 - + rankNcrowdingBased @@ -110,7 +109,7 @@ - 10 + 20 050877 @@ -142,7 +141,8 @@ trajID - x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3, ConstraintEvaluation_impConstr3,fitness,accepted + + x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,fitness,accepted diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml index 4da8636350..e546e6d971 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml @@ -55,14 +55,14 @@ - 20 + 5 42 every - 20 - rouletteWheel + 10 + tournamentSelection 0.8 @@ -71,7 +71,7 @@ 0.9 - + fitnessBased @@ -101,7 +101,7 @@ - 20 + 10 20021986 From 285575f98770c008698f0d40369164b924c22a7c Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 31 Aug 2023 10:24:19 -0600 Subject: [PATCH 41/84] single objective optimization works well with three different types of fitness. Now changes for multi-objective optimization are in need. Specially fitness values need to be replaced with objective values for rank and CD. --- ravenframework/Optimizers/fitness/fitness.py | 44 ++++++++------------ 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 6dc500e641..5ec7942d22 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -113,7 +113,7 @@ def rank_crowding(rlz,**kwargs): def hardConstraint(rlz,**kwargs): r""" - Multiobjective optimization using NSGA-II requires the rank and crowding distance values to the objective function + Fitness method counting the number of constraints violated @ In, rlz, xr.Dataset, containing the evaluation of a certain set of individuals (can be the initial population for the very first iteration, @@ -123,19 +123,26 @@ def hardConstraint(rlz,**kwargs): @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome. offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome. """ - objVar = kwargs['objVar'] + if isinstance(kwargs['objVar'], str) == True: + objVar = [kwargs['objVar']] + else: + objVar = kwargs['objVar'] g = kwargs['constraintFunction'] - data = np.atleast_1d(rlz[objVar].data) - fitness = np.zeros((len(data), 1)) - for i in range(len(fitness)): - fitness[i] = countConstViolation(g.data[i]) - fitness = [-item for sublist in fitness.tolist() for item in sublist] - fitness = xr.DataArray(fitness, - dims=['NumOfConstraintViolated'], - coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) + for j in range(len(objVar)): + fitness = np.zeros((len(g.data), 1)) + for i in range(len(fitness)): + fitness[i] = countConstViolation(g.data[i]) + fitness = [-item for sublist in fitness.tolist() for item in sublist] + fitness = xr.DataArray(fitness, + dims=['NumOfConstraintViolated'], + coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) + if j == 0: + fitnessSet = fitness.to_dataset(name = objVar[j]) + else: + fitnessSet[objVar[j]] = fitness - return fitness + return fitnessSet def feasibleFirst(rlz,**kwargs): @@ -194,21 +201,6 @@ def feasibleFirst(rlz,**kwargs): fitnessSet = fitness.to_dataset(name = objVar[i]) else: fitnessSet[objVar[i]] = fitness - ### This code block is for sinlge objective ### - # data = np.atleast_1d(rlz[objVar].data) - # worstObj = max(data) - # fitness = [] - # for ind in range(data.size): - # if np.all(g.data[ind, :]>=0): - # fit=(data[ind]) - # else: - # fit = worstObj - # for constInd,_ in enumerate(g['Constraint'].data): - # fit+=(max(0,-1 * g.data[ind, constInd])) - # fitness.append(-1 * fit) - # fitness = xr.DataArray(np.array(fitness), - # dims=['chromosome'], - # coords={'chromosome': np.arange(len(data))}) return fitnessSet def logistic(rlz,**kwargs): From 7707f67a29d904d62c7c9116f4e5296097b6dc2a Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Mon, 4 Sep 2023 15:13:17 -0600 Subject: [PATCH 42/84] NSGA-II improvement is in progress. --- ravenframework/Optimizers/GeneticAlgorithm.py | 105 +++++++++--------- ravenframework/Optimizers/RavenSampled.py | 2 +- ravenframework/Optimizers/fitness/fitness.py | 41 +------ .../parentSelectors/parentSelectors.py | 25 ++--- .../survivorSelectors/survivorSelectors.py | 42 ++++--- .../constrained/MinwoRepMultiObjective.xml | 13 ++- .../constrained/testGAMinwRepConstrained.xml | 4 +- 7 files changed, 103 insertions(+), 129 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index e6276f9020..edcc00e9e1 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -322,6 +322,7 @@ def getSolutionExportVariableNames(cls): new['AHDp'] = 'p-Average Hausdorff Distance between populations' new['AHD'] = 'Hausdorff Distance between populations' new['ConstraintEvaluation_{CONSTRAINT}'] = 'Constraint function evaluation (negative if violating and positive otherwise)' + new['FitnessEvaluation_{OBJ}'] = 'Fitness evaluation of each objective' ok.update(new) return ok @@ -374,7 +375,7 @@ def handleInput(self, paramInput): # Check if the fitness requested is among the constrained optimization fitnesses # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst','hardConstraint']: - self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, feasibleFirst and rank_crowding as a fitness, whereas provided fitness is {self._fitnessType}') + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, feasibleFirst and hardConstraint as a fitness, whereas provided fitness is {self._fitnessType}') self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) @@ -552,9 +553,9 @@ def _useRealization(self, info, rlz): objVar=self._objectiveVar, a=self._objCoeff, b=self._penaltyCoeff, - penalty=None, + penalty =None, constraintFunction=g, - type=self._minMax) + type =self._minMax) # 0.2@ n-1: Survivor selection(rlz) # update population container given obtained children @@ -580,15 +581,15 @@ def _useRealization(self, info, rlz): if self.counter > 1: self.population,self.rank, \ self.popAge,self.crowdingDistance, \ - self.objectiveVal,self.constraints, \ + self.objectiveVal,self.fitness, \ self.constraintsV = self._survivorSelectionInstance(age=self.popAge, variables=list(self.toBeSampled), population=self.population, offsprings=rlz, popObjectiveVal=self.objectiveVal, offObjectiveVal=objectiveVal, - popConst = self.constraints, - offConst = offSpringFitness, + popFit = self.fitness, + offFit = offSpringFitness, popConstV = self.constraintsV, offConstV = g ) @@ -599,7 +600,7 @@ def _useRealization(self, info, rlz): self.rank, self.crowdingDistance, self.objectiveVal, - self.constraints, + self.fitness, self.constraintsV) self._resolveNewGenerationMulti(traj, rlz, info) @@ -628,26 +629,30 @@ def _useRealization(self, info, rlz): else: self.population = offSprings - self.constraints = offSpringFitness + self.fitness = offSpringFitness self.constraintsV = g + # offspringObjsVals for Rank and CD calculation offObjVal = [] for i in range(len(self._objectiveVar)): offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) offspringObjsVals = [list(ele) for ele in list(zip(*offObjVal))] - offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringObjsVals)) + # offspringFitVals for Rank and CD calculation + fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data + offspringFitVals = fitVal.tolist() + offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) self.rank = xr.DataArray(offSpringRank, - dims=['rank'], - coords={'rank': np.arange(np.shape(offSpringRank)[0])}) - + dims=['rank'], + coords={'rank': np.arange(np.shape(offSpringRank)[0])}) offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, popSize=len(offSpringRank), - objectives=np.array(offspringObjsVals)) + objectives=np.array(offspringFitVals)) + self.crowdingDistance = xr.DataArray(offSpringCD, - dims=['CrowdingDistance'], - coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) self.objectiveVal = [] for i in range(len(self._objectiveVar)): @@ -657,7 +662,7 @@ def _useRealization(self, info, rlz): self.rank, self.crowdingDistance, self.objectiveVal, - self.constraints, + self.fitness, self.constraintsV) self._resolveNewGenerationMulti(traj, rlz, info) # 1 @ n: Parent selection from population @@ -676,7 +681,7 @@ def _useRealization(self, info, rlz): nParents=self._nParents, rank = self.rank, crowdDistance = self.crowdingDistance, - constraint = self.constraints + fitness = self.fitness ) # 2 @ n: Crossover from set of parents @@ -853,12 +858,13 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) - rlzDict['batchId'] = rlz['batchId'][i] + rlzDict['batchId'] = rlz['batchId'].data[i] for j in range(len(self._objectiveVar)): rlzDict[self._objectiveVar[j]] = objVal.data[i][j] rlzDict['rank'] = np.atleast_1d(self.rank.data)[i] rlzDict['CD'] = np.atleast_1d(self.crowdingDistance.data)[i] - rlzDict['fitness'] = np.atleast_1d(self.constraints.data)[i] + for ind, fitName in enumerate(list(self.fitness.keys())): + rlzDict['FitnessEvaluation_'+fitName] = self.fitness[fitName].data[i] for ind, consName in enumerate([y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]): rlzDict['ConstraintEvaluation_'+consName] = self.constraintsV.data[i,ind] self._updateSolutionExport(traj, rlzDict, acceptable, None) @@ -871,12 +877,14 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): bestRlz = dict((var,np.atleast_1d(rlz[var].data)) for var in set(varList) if var in rlz.data_vars) for i in range(len(self._objectiveVar)): bestRlz[self._objectiveVar[i]] = [item[i] for item in self.multiBestObjective] - bestRlz['fitness'] = self.multiBestFitness + bestRlz['rank'] = self.multiBestRank bestRlz['CD'] = self.multiBestCD if len(self.multiBestConstraint) != 0: # No constraints for ind, consName in enumerate(self.multiBestConstraint.Constraint): bestRlz['ConstraintEvaluation_'+consName.values.tolist()] = self.multiBestConstraint[ind].values + for ind, fitName in enumerate(list(self.multiBestFitness.keys())): + bestRlz['FitnessEvaluation_'+ fitName] = self.multiBestFitness[fitName].data bestRlz.update(self.multiBestPoint) self._optPointHistory[traj].append((bestRlz, info)) elif acceptable == 'rejected': @@ -910,7 +918,7 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g): return point - def _collectOptPointMulti(self, population, rank, CD, objectiveVal, constraints, constraintsV): + def _collectOptPointMulti(self, population, rank, CD, objVal, fitness, constraintsV): """ Collects the point (dict) from a realization @ In, population, Dataset, container containing the population @@ -919,33 +927,20 @@ def _collectOptPointMulti(self, population, rank, CD, objectiveVal, constraints, @ In, crowdingDistance, xr.DataArray, crowdingDistance values at each chromosome of the realization @ Out, point, dict, point used in this realization """ - objVal = [[] for x in range(len(objectiveVal[0]))] - for i in range(len(objectiveVal[0])): - objVal[i] = [item[i] for item in objectiveVal] - - optPointsConsIDX = [i for i, nFit in enumerate(constraints) if nFit == min(constraints)] # Find index of chromosome which has smallest numeber of violations among population - optPointsRankNConsIDX = [i for i, rankValue in enumerate(rank[optPointsConsIDX]) if rankValue == min(rank[optPointsConsIDX])] # Find index of chromosome which has smallest numeber of violations among population & smallest rank - - optPoints,optObjVal,optConstraints,optConstraintsV,optRank,optCD = population[optPointsRankNConsIDX], np.array(objVal)[optPointsRankNConsIDX], constraints.data[optPointsRankNConsIDX], constraintsV.data[optPointsRankNConsIDX], rank.data[optPointsRankNConsIDX], CD.data[optPointsRankNConsIDX] - - # Previous ################################################## - # points,multiFit,rankSorted,cdSorted,objSorted,constSorted = \ - # zip(*[[a,b,c,d,e,f] for a, b, c, d, e, f in sorted(zip(np.atleast_2d(population.data),np.atleast_1d(constraintsV.data),np.atleast_1d(rank.data),np.atleast_1d(CD.data), objVal, constraints), - # reverse=True,key=lambda x: (-x[1], -x[2], x[3]))]) - # optPoints = [points[i] for i, rank in enumerate(rankSorted) if rank == 1 ] - # optMultiFit = [multiFit[i] for i, rank in enumerate(rankSorted) if rank == 1 ] - # optObj = [objSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] - # optConst = [constSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] - # optRank = [rankSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] - # optCD = [cdSorted[i] for i, rank in enumerate(rankSorted) if rank == 1 ] - # if (len(optMultiFit) != len([x for x in optMultiFit if x != 0]) ) : - # optPoints = [optPoints[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] - # optMultiFit = [x for x in optMultiFit if x == 0] - # optObj = [optObj[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] - # optConst = [optConst[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] - # optRank = [optRank[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] - # optCD = [optCD[i] for i, nFit in enumerate(optMultiFit) if nFit == 0 ] - # Previous ################################################## + rankOneIDX = [i for i, rankValue in enumerate(rank.data) if rankValue == 1] + optPoints = population[rankOneIDX] + optObjVal = np.array([list(ele) for ele in list(zip(*objVal))])[rankOneIDX] + count = 0 + for i in list(fitness.keys()): + data = fitness[i][rankOneIDX] + if count == 0: + fitSet = data.to_dataset(name = i) + else: + fitSet[i] = data + count = count + 1 + optConstraintsV = constraintsV.data[rankOneIDX] + optRank = rank.data[rankOneIDX] + optCD = CD.data[rankOneIDX] optPointsDic = dict((var,np.array(optPoints)[:,i]) for i, var in enumerate(population.Gene.data)) optConstNew = [] @@ -954,15 +949,15 @@ def _collectOptPointMulti(self, population, rank, CD, objectiveVal, constraints, optConstNew = list(map(list, zip(*optConstNew))) if (len(optConstNew)) != 0: optConstNew = xr.DataArray(optConstNew, - dims=['Constraint','Evaluation'], - coords={'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)], - 'Evaluation':np.arange(np.shape(optConstNew)[1])}) + dims=['Constraint','Evaluation'], + coords={'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)], + 'Evaluation':np.arange(np.shape(optConstNew)[1])}) self.multiBestPoint = optPointsDic - self.multiBestFitness = optConstraints + self.multiBestFitness = fitSet self.multiBestObjective = optObjVal self.multiBestConstraint = optConstNew - self.multiBestRank = optRank + self.multiBestRank = optRank #TODO JY: MultiBestRank is not anymore in need. This should be removed later. self.multiBestCD = optCD return optPointsDic @@ -1287,7 +1282,7 @@ def _addToSolutionExport(self, traj, rlz, acceptable): # meta variables toAdd = {'age': 0 if self.popAge is None else self.popAge, 'batchId': self.batchId, - 'fitness': rlz['fitness'], + # 'fitness': rlz['fitness'], 'AHDp': self.ahdp, 'AHD': self.ahd, 'rank': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['rank'], @@ -1318,6 +1313,8 @@ def _formatSolutionExportVariableNames(self, acceptable): new.extend([template.format(CONV=conv) for conv in self._convergenceCriteria]) elif '{VAR}' in template: new.extend([template.format(VAR=var) for var in self.toBeSampled]) + elif '{OBJ}' in template: + new.extend([template.format(OBJ=obj) for obj in self._objectiveVar]) elif '{CONSTRAINT}' in template: new.extend([template.format(CONSTRAINT=constraint.name) for constraint in self._constraintFunctions + self._impConstraintFunctions]) else: diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py index 82505cf3e2..b3d13274bc 100644 --- a/ravenframework/Optimizers/RavenSampled.py +++ b/ravenframework/Optimizers/RavenSampled.py @@ -731,7 +731,7 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason): # constants and functions toExport.update(self.constants) toExport.update(dict((var, rlz[var]) for var in self.dependentSample if var in rlz)) - # additional from from inheritors + # additional from inheritors toExport.update(self._addToSolutionExport(traj, rlz, acceptable)) # check for anything else that solution export wants that rlz might provide for var in self._solutionExport.getVars(): diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 5ec7942d22..725e28c10f 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -80,37 +80,6 @@ def invLinear(rlz,**kwargs): coords={'chromosome': np.arange(len(data))}) return fitness -def rank_crowding(rlz,**kwargs): - r""" - Multiobjective optimization using NSGA-II requires the rank and crowding distance values to the objective function - - @ In, rlz, xr.Dataset, containing the evaluation of a certain - set of individuals (can be the initial population for the very first iteration, - or a population of offsprings) - @ In, kwargs, dict, dictionary of parameters for this rank_crowding method: - objVar, string, the names of the objective variables - @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome. - offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome. - """ - objectiveVal = [] - for i in range(len(kwargs['objVals'])): - objectiveVal.append(list(np.atleast_1d(rlz[kwargs['objVals'][i]].data))) - - offspringObjsVals = [list(ele) for ele in list(zip(*objectiveVal))] - - offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringObjsVals)) - offSpringRank = xr.DataArray(offSpringRank, - dims=['rank'], - coords={'rank': np.arange(np.shape(offSpringRank)[0])}) - - offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, popSize=len(offSpringRank), objectives=np.array(offspringObjsVals)) - offSpringCD = xr.DataArray(offSpringCD, - dims=['CrowdingDistance'], - coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) - - return offSpringRank, offSpringCD - - def hardConstraint(rlz,**kwargs): r""" Fitness method counting the number of constraints violated @@ -135,8 +104,8 @@ def hardConstraint(rlz,**kwargs): fitness[i] = countConstViolation(g.data[i]) fitness = [-item for sublist in fitness.tolist() for item in sublist] fitness = xr.DataArray(fitness, - dims=['NumOfConstraintViolated'], - coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) + dims=['NumOfConstraintViolated'], + coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) if j == 0: fitnessSet = fitness.to_dataset(name = objVar[j]) else: @@ -182,6 +151,7 @@ def feasibleFirst(rlz,**kwargs): else: objVar = kwargs['objVar'] g = kwargs['constraintFunction'] + penalty = kwargs['b'] for i in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[i]].data) worstObj = max(data) @@ -192,8 +162,8 @@ def feasibleFirst(rlz,**kwargs): else: fit = worstObj for constInd,_ in enumerate(g['Constraint'].data): - fit+=(max(0,-1 * g.data[ind, constInd])) - fitness.append(-1 * fit) + fit+= penalty*(max(0,-1*g.data[ind, constInd])) + fitness.append(fit) fitness = xr.DataArray(np.array(fitness), dims=['chromosome'], coords={'chromosome': np.arange(len(data))}) @@ -246,7 +216,6 @@ def logistic(rlz,**kwargs): __fitness['invLinear'] = invLinear __fitness['logistic'] = logistic __fitness['feasibleFirst'] = feasibleFirst -__fitness['rank_crowding'] = rank_crowding __fitness['hardConstraint'] = hardConstraint diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py index cf7d5b33f4..5ddc0d87ae 100644 --- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py +++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py @@ -105,13 +105,13 @@ def tournamentSelection(population,**kwargs): # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to rank = kwargs['rank'] crowdDistance = kwargs['crowdDistance'] - constraintInfo = kwargs['constraint'] + # constraintInfo = kwargs['constraint'] multiObjectiveRanking = True - matrixOperationRaw = np.zeros((popSize, 4)) + matrixOperationRaw = np.zeros((popSize, 3)) #NOTE if constraint is needed to eliminate chromosome violating constraints, then poopSize should be 4. matrixOperationRaw[:,0] = np.transpose(np.arange(popSize)) matrixOperationRaw[:,1] = np.transpose(crowdDistance.data) matrixOperationRaw[:,2] = np.transpose(rank.data) - matrixOperationRaw[:,3] = np.transpose(constraintInfo.data) + # matrixOperationRaw[:,3] = np.transpose(constraintInfo.data) matrixOperation = np.zeros((popSize,len(matrixOperationRaw[0]))) else: fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) @@ -144,18 +144,13 @@ def tournamentSelection(population,**kwargs): selectedParent[i,:] = pop.values[index,:] else: # multi-objective implementation of tournamentSelection for i in range(nParents): - if matrixOperation[2*i,3] > matrixOperation[2*i+1,3]: index = int(matrixOperation[2*i+1,0]) - elif matrixOperation[2*i,3] < matrixOperation[2*i+1,3]: index = int(matrixOperation[2*i,0]) - elif matrixOperation[2*i,3] == matrixOperation[2*i+1,3]: # if same number of constraints violations - if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]: - index = int(matrixOperation[2*i+1,0]) - elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: - index = int(matrixOperation[2*i,0]) - else: # same number of constraints and same rank case - if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: - index = int(matrixOperation[2*i,0]) - else: - index = int(matrixOperation[2*i+1,0]) + if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i+1,0]) + elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i,0]) + elif matrixOperation[2*i,2] == matrixOperation[2*i+1,2]: # if same rank, then compare CD + if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: index = int(matrixOperation[2*i,0]) + elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i+1,0]) + else: # same rank and same CD + index = int(matrixOperation[2*i+1,0]) #NOTE if rank and CD are same, then any chromosome can be selected. selectedParent[i,:] = pop.values[index,:] return selectedParent diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index 6848c22275..c3dce2f1a2 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -158,12 +158,19 @@ def rankNcrowdingBased(offsprings, **kwargs): offSprings = np.atleast_2d(offsprings[kwargs['variables']].to_array().transpose().data) popObjectiveVal = kwargs['popObjectiveVal'] offObjectiveVal = kwargs['offObjectiveVal'] - popConst = kwargs['popConst'].data - offConst = kwargs['offConst'].data + popFit = kwargs['popFit'] + popFitArray = [] + offFit = kwargs['offFit'] + offFitArray = [] + for i in list(popFit.keys()): #NOTE popFit.keys() and offFit.keys() must be same. + popFitArray.append(popFit[i].data.tolist()) + offFitArray.append(offFit[i].data.tolist()) + + newFitMerged = np.array([i + j for i, j in zip(popFitArray, offFitArray)]) + newFitMerged_pair = [list(ele) for ele in list(zip(*newFitMerged))] + popConstV = kwargs['popConstV'].data offConstV = kwargs['offConstV'].data - - newConstMerged = np.append(popConst, offConst) newConstVMerged = np.array(popConstV.tolist() + offConstV.tolist()) newObjectivesMerged = np.array([i + j for i, j in zip(popObjectiveVal, offObjectiveVal)]) @@ -182,15 +189,16 @@ def rankNcrowdingBased(offsprings, **kwargs): newAge = list(map(lambda x:x+1, popAge)) newPopulationMerged = np.concatenate([population,offSprings]) newAge.extend([0]*len(offSprings)) - - sortedConst,sortedRank,sortedCD,sortedAge,sortedPopulation,sortedObjectives,sortedConstV = \ + + sortedRank,sortedCD,sortedAge,sortedPopulation,sortedFit,sortedObjectives,sortedConstV = \ zip(*[(x,y,z,i,j,k,a) for x,y,z,i,j,k,a in \ - sorted(zip(newConstMerged,newPopRank.data,newPopCD.data,newAge,newPopulationMerged.tolist(),newObjectivesMerged_pair,newConstVMerged),reverse=False,key=lambda x: (x[0], x[1], -x[2]))]) - sortedConstT,sortedRankT,sortedCDT,sortedAgeT,sortedPopulationT,sortedObjectivesT,sortedConstVT = \ - np.atleast_1d(list(sortedConst)),np.atleast_1d(list(sortedRank)),list(sortedCD),list(sortedAge),np.atleast_1d(list(sortedPopulation)),np.atleast_1d(list(sortedObjectives)),np.atleast_1d(list(sortedConstV)) + sorted(zip(newPopRank.data, newPopCD.data, newAge, newPopulationMerged.tolist(), newFitMerged_pair, newObjectivesMerged_pair, newConstVMerged),reverse=False,key=lambda x: (x[0], -x[1]))]) + sortedRankT, sortedCDT, sortedAgeT, sortedPopulationT, sortedFitT, sortedObjectivesT, sortedConstVT = \ + np.atleast_1d(list(sortedRank)), list(sortedCD), list(sortedAge),np.atleast_1d(list(sortedPopulation)),np.atleast_1d(list(sortedFit)),np.atleast_1d(list(sortedObjectives)),np.atleast_1d(list(sortedConstV)) newPopulation = sortedPopulationT[:-len(offSprings)] newObjectives = sortedObjectivesT[:-len(offSprings)] + newFit = sortedFitT[:-len(offSprings)] newRank = frontUtils.rankNonDominatedFrontiers(newObjectives) newRank = xr.DataArray(newRank, @@ -204,23 +212,27 @@ def rankNcrowdingBased(offsprings, **kwargs): coords={'CrowdingDistance': np.arange(np.shape(newCD)[0])}) newAge = sortedAgeT[:-len(offSprings)] - newConst = sortedConstT[:-len(offSprings)] newConstV = sortedConstVT[:-len(offSprings)] + for i in range(len(list(popFit.keys()))): + fitness = xr.DataArray(newFit[:,i], + dims=['chromosome'], + coords={'chromosome': np.arange(len(newFit[:,i]))}) + if i == 0: + newFitnessSet = fitness.to_dataset(name = list(popFit.keys())[i]) + else: + newFitnessSet[list(popFit.keys())[i]] = fitness + newPopulationArray = xr.DataArray(newPopulation, dims=['chromosome','Gene'], coords={'chromosome':np.arange(np.shape(newPopulation)[0]), 'Gene': kwargs['variables']}) - newConst = xr.DataArray(newConst, - dims=['NumOfConstViolated'], - coords={'NumOfConstViolated':np.arange(np.shape(newConst)[0])}) - newConstV = xr.DataArray(newConstV, dims=['chromosome','ConstEvaluation'], coords={'chromosome':np.arange(np.shape(newPopulation)[0]), 'ConstEvaluation':np.arange(np.shape(newConstV)[1])}) - return newPopulationArray,newRank,newAge,newCD,newObjectivesP,newConst,newConstV + return newPopulationArray,newRank,newAge,newCD,newObjectivesP,newFitnessSet,newConstV __survivorSelectors = {} __survivorSelectors['ageBased'] = ageBased diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index 05460107a7..9bf8993c9d 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -9,7 +9,7 @@ - Multi_MinwoReplacement_wo_constraints_20_5 + Multi_MinwoReplacement_wo_constraints_20_5_M optimize,print 1 @@ -67,13 +67,14 @@ tournamentSelection - 0.7 + 0.8 - - 0.7 + + 0.8 - + + 100 rankNcrowdingBased @@ -142,7 +143,7 @@ trajID - x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,fitness,accepted + x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,ConstraintEvaluation_expConstr3, ConstraintEvaluation_impConstr3,accepted diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml index e546e6d971..08267e5c89 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml @@ -61,7 +61,7 @@ - 10 + 20 tournamentSelection @@ -101,7 +101,7 @@ - 10 + 20 20021986 From a32a45c2d5f81d08f7c1f7bd831a37eced294367 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Wed, 6 Sep 2023 17:18:12 -0600 Subject: [PATCH 43/84] fitness-based NSGA-II is in progress. min-min is working well with tournament selection and feasibleFirst. --- ravenframework/Optimizers/GeneticAlgorithm.py | 13 +++++++++---- ravenframework/Optimizers/Optimizer.py | 4 ++-- ravenframework/Optimizers/fitness/fitness.py | 5 ++++- .../survivorSelectors/survivorSelectors.py | 4 ++-- .../AnalyticModels/optimizing/myConstraints.py | 2 +- .../constrained/MinwoRepMultiObjective.xml | 18 +++++++++++------- 6 files changed, 29 insertions(+), 17 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index edcc00e9e1..69e3f4205b 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -271,12 +271,12 @@ def getInputSpecification(cls): fitness.addParam("type", InputTypes.StringType, True, descr=r"""[invLin, logistic, feasibleFirst]""") objCoeff = InputData.parameterInputFactory('a', strictMode=True, - contentType=InputTypes.FloatType, + contentType=InputTypes.FloatListType, printPriority=108, descr=r""" a: coefficient of objective function.""") fitness.addSub(objCoeff) penaltyCoeff = InputData.parameterInputFactory('b', strictMode=True, - contentType=InputTypes.FloatType, + contentType=InputTypes.FloatListType, printPriority=108, descr=r""" b: coefficient of constraint penalty.""") fitness.addSub(penaltyCoeff) @@ -369,15 +369,20 @@ def handleInput(self, paramInput): survivorSelectionNode = gaParamsNode.findFirst('survivorSelection') self._survivorSelectionType = survivorSelectionNode.value self._survivorSelectionInstance = survivorSelectionReturnInstance(self,name = self._survivorSelectionType) + if len(self._objectiveVar) == 1 and self._survivorSelectionType == 'rankNcrowdingBased': + self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ') # Fitness fitnessNode = gaParamsNode.findFirst('fitness') self._fitnessType = fitnessNode.parameterValues['type'] - # Check if the fitness requested is among the constrained optimization fitnesses # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst','hardConstraint']: self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, feasibleFirst and hardConstraint as a fitness, whereas provided fitness is {self._fitnessType}') self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None + expConstr = self.assemblerObjects['Constraint'][0] + impConstr = self.assemblerObjects['ImplicitConstraint'][0] + if len(self._penaltyCoeff) != len(self._objectiveVar) * (len([ele for ele in expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in impConstr if ele != 'Functions' if ele !='External']) ): + self.raiseAnError(IOError, f'The number of penaltyCoeff. in should be identical with the number of objective in and the number of constraints (i.e., and )') self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. @@ -637,7 +642,7 @@ def _useRealization(self, info, rlz): for i in range(len(self._objectiveVar)): offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - offspringObjsVals = [list(ele) for ele in list(zip(*offObjVal))] + # offspringObjsVals = [list(ele) for ele in list(zip(*offObjVal))] # offspringFitVals for Rank and CD calculation fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index ddad3eed36..d12e96d71b 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -265,9 +265,9 @@ def handleInput(self, paramInput): if minMax is not None: self._minMax = minMax.value if len(self._minMax) != len(self._objectiveVar): - self.raiseAnError(IOError, 'type and objective must be of the same length!') + self.raiseAnError(IOError, 'The number of in -- and in - must be of the same length!') if list(set(self._minMax)-set(['min','max'])) != []: - self.raiseAnError(IOError, "type must be a list of 'min' and/or 'max'") + self.raiseAnError(IOError, " must be a either 'min' and/or 'max'") # variables additional reading for varNode in paramInput.findAll('variable'): diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 725e28c10f..3384cf98d0 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -152,6 +152,9 @@ def feasibleFirst(rlz,**kwargs): objVar = kwargs['objVar'] g = kwargs['constraintFunction'] penalty = kwargs['b'] + pen = [penalty[i:i+len(g['Constraint'].data)] for i in range(0, len(penalty), len(g['Constraint'].data))] + objPen = dict(map(lambda i,j : (i,j), objVar, pen)) + for i in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[i]].data) worstObj = max(data) @@ -162,7 +165,7 @@ def feasibleFirst(rlz,**kwargs): else: fit = worstObj for constInd,_ in enumerate(g['Constraint'].data): - fit+= penalty*(max(0,-1*g.data[ind, constInd])) + fit+= objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." fitness.append(fit) fitness = xr.DataArray(np.array(fitness), dims=['chromosome'], diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index c3dce2f1a2..9702a91192 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -176,12 +176,12 @@ def rankNcrowdingBased(offsprings, **kwargs): newObjectivesMerged = np.array([i + j for i, j in zip(popObjectiveVal, offObjectiveVal)]) newObjectivesMerged_pair = [list(ele) for ele in list(zip(*newObjectivesMerged))] - newPopRank = frontUtils.rankNonDominatedFrontiers(np.array(newObjectivesMerged_pair)) + newPopRank = frontUtils.rankNonDominatedFrontiers(np.array(newFitMerged_pair)) newPopRank = xr.DataArray(newPopRank, dims=['rank'], coords={'rank': np.arange(np.shape(newPopRank)[0])}) - newPopCD = frontUtils.crowdingDistance(rank=newPopRank, popSize=len(newPopRank), objectives=np.array(newObjectivesMerged_pair)) + newPopCD = frontUtils.crowdingDistance(rank=newPopRank, popSize=len(newPopRank), objectives=np.array(newFitMerged_pair)) newPopCD = xr.DataArray(newPopCD, dims=['CrowdingDistance'], coords={'CrowdingDistance': np.arange(np.shape(newPopCD)[0])}) diff --git a/tests/framework/AnalyticModels/optimizing/myConstraints.py b/tests/framework/AnalyticModels/optimizing/myConstraints.py index d63407ef6f..ced89f7b48 100644 --- a/tests/framework/AnalyticModels/optimizing/myConstraints.py +++ b/tests/framework/AnalyticModels/optimizing/myConstraints.py @@ -94,7 +94,7 @@ def impConstr2(Input): @ In, Input, object, RAVEN container @ out, g, float, implicit constraint 2 evaluation function """ - g = Input.x1**2 + Input.obj - 10 + g = Input.x1**2 + Input.obj1 - 10 return g def impConstr3(Input): diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index 9bf8993c9d..83e3c898d2 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -9,9 +9,9 @@ - Multi_MinwoReplacement_wo_constraints_20_5_M + Multi_MinwoReplacement_wo_constraints_50_10 optimize,print - 1 + 4 @@ -41,6 +41,9 @@ x1,x2,x3,x4,x5,x6 + x1,x2,x3,x4,x5,x6,obj1 @@ -57,10 +60,10 @@ - 5 + 10 42 every - min,min + min 20 @@ -74,7 +77,7 @@ - 100 + 50, 50, 100, 100 rankNcrowdingBased @@ -99,10 +102,11 @@ woRep_dist - obj1, obj2 + obj1 optOut MC_samp expConstr3 + impConstr3 @@ -138,7 +142,7 @@ x1,x2,x3,x4,x5,x6 - obj1,obj2 + obj1 trajID From a9577f4566563373ea5433b32c744b68a2fcced8 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 7 Sep 2023 14:16:01 -0600 Subject: [PATCH 44/84] NSGA-II fitness-based rank and CD calcuration is completed. Temporary plotting of pareto frontier is also updated reflectig optimization objectives (min or mix). --- ravenframework/Optimizers/GeneticAlgorithm.py | 40 ++++++++++--------- ravenframework/Optimizers/Optimizer.py | 4 +- .../constrained/MinwoRepMultiObjective.xml | 21 +++++----- 3 files changed, 33 insertions(+), 32 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 69e3f4205b..6804195d11 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -343,6 +343,8 @@ def handleInput(self, paramInput): parentSelectionNode = gaParamsNode.findFirst('parentSelection') self._parentSelectionType = parentSelectionNode.value self._parentSelectionInstance = parentSelectionReturnInstance(self, name=parentSelectionNode.value) + if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection': + self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') # reproduction node reproductionNode = gaParamsNode.findFirst('reproduction') self._nParents = int(np.ceil(1/2 + np.sqrt(1+4*self._populationSize)/2)) @@ -350,6 +352,8 @@ def handleInput(self, paramInput): # crossover node crossoverNode = reproductionNode.findFirst('crossover') self._crossoverType = crossoverNode.parameterValues['type'] + if self._crossoverType not in ['onePointCrossover','twoPointsCrossover','uniformCrossover']: + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support onePointCrossover, twoPointsCrossover and uniformCrossover as a crossover, whereas provided crossover is {self._crossoverType}') if crossoverNode.findFirst('points') is None: self._crossoverPoints = None else: @@ -359,6 +363,8 @@ def handleInput(self, paramInput): # mutation node mutationNode = reproductionNode.findFirst('mutation') self._mutationType = mutationNode.parameterValues['type'] + if self._mutationType not in ['swapMutator','scrambleMutator','inversionMutator','bitFlipMutator']: + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support swapMutator, scrambleMutator, inversionMutator, and bitFlipMutator as a mutator, whereas provided mutator is {self._mutationType}') if mutationNode.findFirst('locs') is None: self._mutationLocs = None else: @@ -369,6 +375,8 @@ def handleInput(self, paramInput): survivorSelectionNode = gaParamsNode.findFirst('survivorSelection') self._survivorSelectionType = survivorSelectionNode.value self._survivorSelectionInstance = survivorSelectionReturnInstance(self,name = self._survivorSelectionType) + if self._survivorSelectionType not in ['ageBased','fitnessBased','rankNcrowdingBased']: + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support ageBased, fitnessBased, and rankNcrowdingBased as a survivorSelector, whereas provided survivorSelector is {self._survivorSelectionType}') if len(self._objectiveVar) == 1 and self._survivorSelectionType == 'rankNcrowdingBased': self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ') # Fitness @@ -610,26 +618,22 @@ def _useRealization(self, info, rlz): self._resolveNewGenerationMulti(traj, rlz, info) ############################################################################## - objs_vals = [list(ele) for ele in list(zip(*self.objectiveVal))] ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used ## These are currently for debugging purposes import matplotlib.pyplot as plt - # JY: Visualization: all points - This code block needs to be either deleted or revisited. - plt.plot(np.array(objs_vals)[:,0], np.array(objs_vals)[:,1],'*') - - # JY: Visualization: optimal points only - This code block needs to be either deleted or revisited. - # plt.xlim(75,100) - # plt.ylim(5,20) - # plt.xlim(0,1) - # plt.ylim(0,6) - plt.title(str('Iteration ' + str(self.counter-1))) - - plt.plot(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0], - np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,1],'*') - for i in range(len(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[:,0])): - plt.text(np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,0], - np.array(list(zip(self._optPointHistory[traj][-1][0][self._objectiveVar[0]], self._optPointHistory[traj][-1][0][self._objectiveVar[1]])))[i,1], str(self.batchId-1)) - plt.savefig('PF'+str(i)+'_'+str(self.counter-1)+'.png') + + signChange = list(map(lambda x:-1 if x=="max" else 1 , self._minMax)) + for i in range(0, len(self.multiBestObjective)): + newMultiBestObjective = self.multiBestObjective * signChange + + plt.title(str('BatchID = ' + str(self.batchId))) + plt.plot(newMultiBestObjective[:,0], + newMultiBestObjective[:,1],'*') + + for i in range(len(self.multiBestObjective[:,0])): + plt.text(newMultiBestObjective[i,0], + newMultiBestObjective[i,1], str(self.batchId)) + plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') ############################################################################## else: @@ -642,8 +646,6 @@ def _useRealization(self, info, rlz): for i in range(len(self._objectiveVar)): offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - # offspringObjsVals = [list(ele) for ele in list(zip(*offObjVal))] - # offspringFitVals for Rank and CD calculation fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data offspringFitVals = fitVal.tolist() diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index d12e96d71b..a9b5600615 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -162,7 +162,7 @@ def __init__(self): # public # _protected self._seed = None # random seed to apply - self._minMax = ['min'] # maximization or minimization? + self._minMax = ['min'] # maximization or minimization? self._activeTraj = [] # tracks live trajectories self._cancelledTraj = {} # tracks cancelled trajectories, and reasons self._convergedTraj = {} # tracks converged trajectories, and values obtained @@ -267,7 +267,7 @@ def handleInput(self, paramInput): if len(self._minMax) != len(self._objectiveVar): self.raiseAnError(IOError, 'The number of in -- and in - must be of the same length!') if list(set(self._minMax)-set(['min','max'])) != []: - self.raiseAnError(IOError, " must be a either 'min' and/or 'max'") + self.raiseAnError(IOError, " under - must be a either 'min' and/or 'max'") # variables additional reading for varNode in paramInput.findAll('variable'): diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index 83e3c898d2..1483ffa8f5 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -9,7 +9,7 @@ - Multi_MinwoReplacement_wo_constraints_50_10 + Multi_MinwoReplacement_wo_constraints_min_max_min_50_20 optimize,print 4 @@ -33,7 +33,7 @@ - x1,x2,x3,x4,x5,x6,obj1,obj2 + x1,x2,x3,x4,x5,x6,obj1,obj2,obj3 @@ -60,13 +60,13 @@ - 10 + 5 42 every - min + min, max, min - 20 + 50 tournamentSelection @@ -77,7 +77,7 @@ - 50, 50, 100, 100 + 50, 50, 100, 100, 100, 150 rankNcrowdingBased @@ -102,7 +102,7 @@ woRep_dist - obj1 + obj1, obj2, obj3 optOut MC_samp expConstr3 @@ -114,7 +114,7 @@ - 20 + 50 050877 @@ -142,12 +142,11 @@ x1,x2,x3,x4,x5,x6 - obj1 + obj1, obj2, obj3 trajID - - x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,ConstraintEvaluation_expConstr3, ConstraintEvaluation_impConstr3,accepted + x1,x2,x3,x4,x5,x6,obj1,obj2,obj3,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,ConstraintEvaluation_expConstr3, ConstraintEvaluation_impConstr3,accepted From 9b42d7d66d51c3299cd30ab65cd3c812c4742b47 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Sun, 10 Sep 2023 17:31:50 -0600 Subject: [PATCH 45/84] minor bugs are fixed. --- ravenframework/Optimizers/GeneticAlgorithm.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 6804195d11..26fbbdbbbe 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -389,8 +389,11 @@ def handleInput(self, paramInput): self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None expConstr = self.assemblerObjects['Constraint'][0] impConstr = self.assemblerObjects['ImplicitConstraint'][0] - if len(self._penaltyCoeff) != len(self._objectiveVar) * (len([ele for ele in expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in impConstr if ele != 'Functions' if ele !='External']) ): - self.raiseAnError(IOError, f'The number of penaltyCoeff. in should be identical with the number of objective in and the number of constraints (i.e., and )') + if self._penaltyCoeff != None: + if len(self._penaltyCoeff) != len(self._objectiveVar) * (len([ele for ele in expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in impConstr if ele != 'Functions' if ele !='External']) ): + self.raiseAnError(IOError, f'The number of penaltyCoeff. in should be identical with the number of objective in and the number of constraints (i.e., and )') + else: + pass self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. From f6ecb5f194763ad73aa112f84eb6007eec268a97 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Sun, 17 Sep 2023 16:38:00 -0600 Subject: [PATCH 46/84] Every type of fitness is now working with newly updated GA interface (fitnes: Xarray array=> Xarray dataset) which is for handling multi-objective optimization. Regression tests passed except newly included multi-objective optimization tests (i.e., ZDT1, and multisum) due to its survival selection method change. --- ravenframework/Optimizers/GeneticAlgorithm.py | 176 +++++++++++------- ravenframework/Optimizers/fitness/fitness.py | 70 ++++--- .../continuous/unconstrained/ZDT1.xml | 2 +- 3 files changed, 153 insertions(+), 95 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 26fbbdbbbe..cff7e23c89 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -17,12 +17,14 @@ Genetic Algorithm-based optimization. Multiple strategies for mutations, cross-overs, etc. are available. Created June,3,2020 - @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi + Updated Sepember,17,2023 + @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim References ---------- .. [1] Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73. - [2] Z. Michalewicz, "Genetic Algorithms. + Data Structures. = Evolution Programs," Third, Revised - and Extended Edition, Springer (1996). + [2] Z. Michalewicz, "Genetic Algorithms. + Data Structures. = Evolution Programs," Third, Revised and Extended Edition, Springer (1996). + [3] Deb, Kalyanmoy, et al. "A fast and elitist multiobjective genetic algorithm: NSGA-II." IEEE transactions on evolutionary computation 6.2 (2002): 182-197. + [4] Deb, Kalyanmoy. "An efficient constraint handling method for genetic algorithms." Computer methods in applied mechanics and engineering 186.2-4 (2000): 311-338. """ # External Modules---------------------------------------------------------------------------------- from collections import deque, defaultdict @@ -267,9 +269,13 @@ def getInputSpecification(cls): b. logistic: $fitness = \frac{1}{1+e^{a\times(obj-b)}}$. - c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$""") + c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$ + + d. hardConstraint: $fitness = the number of constraints violated.$ + + """) fitness.addParam("type", InputTypes.StringType, True, - descr=r"""[invLin, logistic, feasibleFirst]""") + descr=r"""[invLin, logistic, feasibleFirst, hardConstraint]""") objCoeff = InputData.parameterInputFactory('a', strictMode=True, contentType=InputTypes.FloatListType, printPriority=108, @@ -334,22 +340,36 @@ def handleInput(self, paramInput): @ Out, None """ RavenSampled.handleInput(self, paramInput) - # GAparams + #################################################################################### + # GAparams # + #################################################################################### gaParamsNode = paramInput.findFirst('GAparams') - # populationSize + + #################################################################################### + # populationSize # + #################################################################################### populationSizeNode = gaParamsNode.findFirst('populationSize') self._populationSize = populationSizeNode.value - # parent selection + + #################################################################################### + # parent selection node # + #################################################################################### parentSelectionNode = gaParamsNode.findFirst('parentSelection') self._parentSelectionType = parentSelectionNode.value self._parentSelectionInstance = parentSelectionReturnInstance(self, name=parentSelectionNode.value) if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection': self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') - # reproduction node + + #################################################################################### + # reproduction node # + #################################################################################### reproductionNode = gaParamsNode.findFirst('reproduction') self._nParents = int(np.ceil(1/2 + np.sqrt(1+4*self._populationSize)/2)) self._nChildren = int(2*comb(self._nParents,2)) - # crossover node + + #################################################################################### + # crossover node # + #################################################################################### crossoverNode = reproductionNode.findFirst('crossover') self._crossoverType = crossoverNode.parameterValues['type'] if self._crossoverType not in ['onePointCrossover','twoPointsCrossover','uniformCrossover']: @@ -360,18 +380,24 @@ def handleInput(self, paramInput): self._crossoverPoints = crossoverNode.findFirst('points').value self._crossoverProb = crossoverNode.findFirst('crossoverProb').value self._crossoverInstance = crossoversReturnInstance(self,name = self._crossoverType) - # mutation node + + #################################################################################### + # mutation node # + #################################################################################### mutationNode = reproductionNode.findFirst('mutation') self._mutationType = mutationNode.parameterValues['type'] - if self._mutationType not in ['swapMutator','scrambleMutator','inversionMutator','bitFlipMutator']: - self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support swapMutator, scrambleMutator, inversionMutator, and bitFlipMutator as a mutator, whereas provided mutator is {self._mutationType}') + if self._mutationType not in ['swapMutator','scrambleMutator','inversionMutator','bitFlipMutator','randomMutator']: + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support swapMutator, scrambleMutator, inversionMutator, bitFlipMutator, and randomMutator as a mutator, whereas provided mutator is {self._mutationType}') if mutationNode.findFirst('locs') is None: self._mutationLocs = None else: self._mutationLocs = mutationNode.findFirst('locs').value self._mutationProb = mutationNode.findFirst('mutationProb').value self._mutationInstance = mutatorsReturnInstance(self,name = self._mutationType) - # Survivor selection + + #################################################################################### + # survivor selection node # + #################################################################################### survivorSelectionNode = gaParamsNode.findFirst('survivorSelection') self._survivorSelectionType = survivorSelectionNode.value self._survivorSelectionInstance = survivorSelectionReturnInstance(self,name = self._survivorSelectionType) @@ -379,25 +405,53 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support ageBased, fitnessBased, and rankNcrowdingBased as a survivorSelector, whereas provided survivorSelector is {self._survivorSelectionType}') if len(self._objectiveVar) == 1 and self._survivorSelectionType == 'rankNcrowdingBased': self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ') - # Fitness + + #################################################################################### + # fitness node # + #################################################################################### fitnessNode = gaParamsNode.findFirst('fitness') self._fitnessType = fitnessNode.parameterValues['type'] + + #################################################################################### + # constraint node # + #################################################################################### # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. - if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst','hardConstraint']: - self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, feasibleFirst and hardConstraint as a fitness, whereas provided fitness is {self._fitnessType}') - self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None - self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None - expConstr = self.assemblerObjects['Constraint'][0] - impConstr = self.assemblerObjects['ImplicitConstraint'][0] - if self._penaltyCoeff != None: - if len(self._penaltyCoeff) != len(self._objectiveVar) * (len([ele for ele in expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in impConstr if ele != 'Functions' if ele !='External']) ): + if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','logistic', 'feasibleFirst','hardConstraint']: + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, logistic, feasibleFirst and hardConstraint as a fitness, whereas provided fitness is {self._fitnessType}') + self._expConstr = self.assemblerObjects['Constraint'][0] if 'Constraint' in self.assemblerObjects else None + self._impConstr = self.assemblerObjects['ImplicitConstraint'][0] if 'ImplicitConstraint' in self.assemblerObjects else None + if self._expConstr != None and self._impConstr != None: + self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External']) + elif self._expConstr == None and self._impConstr != None: + self._numOfConst = len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External']) + elif self._expConstr != None and self._impConstr == None: + self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + else: + self._numOfConst = 0 + if (self._expConstr != None) and (self._impConstr != None) and (self._penaltyCoeff != None): + if len(self._penaltyCoeff) != len(self._objectiveVar) * self._numOfConst: self.raiseAnError(IOError, f'The number of penaltyCoeff. in should be identical with the number of objective in and the number of constraints (i.e., and )') else: pass + self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None + #NOTE the code lines below are for 'feasibleFirst' temperarily. It will be generalized for invLinear as well. + if self._fitnessType == 'feasibleFirst': + if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: + self._penaltyCoeff = fitnessNode.findFirst('b').value + elif self._numOfConst == 0 and fitnessNode.findFirst('b') is not None: + self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints') + elif self._numOfConst != 0 and fitnessNode.findFirst('b') is None: + self._penaltyCoeff = list(np.repeat(1, self._numOfConst * len(self._objectiveVar))) #NOTE if penaltyCoeff is not provided, then assume they are all 1. + else: + self._penaltyCoeff = list(np.repeat(0, len(self._objectiveVar))) + else: + self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. - # Convergence Criterion + #################################################################################### + # convergence criterion node # + #################################################################################### convNode = paramInput.findFirst('convergence') if convNode is not None: for sub in convNode.subparts: @@ -453,9 +507,9 @@ def needDenormalized(self): # overload as needed in inheritors return True - ########################################################################################################## - # Run Methods # - ########################################################################################################## + #################################################################################### + # Run Methods # + #################################################################################### def _useRealization(self, info, rlz): """ @@ -480,11 +534,11 @@ def _useRealization(self, info, rlz): # 0.1 @ n-1: fitnessCalculation(rlz) # perform fitness calculation for newly obtained children (rlz) - if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is a single-objective Optimization case + if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar[0]].data)) - # collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) + # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) constraintData = {} if self._constraintFunctions or self._impConstraintFunctions: params = [] @@ -492,8 +546,7 @@ def _useRealization(self, info, rlz): params += y.parameterNames() for p in list(set(params) -set([self._objectiveVar[0]]) -set(list(self.toBeSampled.keys()))): constraintData[p] = list(np.atleast_1d(rlz[p].data)) - # Compute constraint function g_j(x) for all constraints (j = 1 .. J) - # and all x's (individuals) in the population + # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) g = xr.DataArray(g0, @@ -522,18 +575,19 @@ def _useRealization(self, info, rlz): b=self._penaltyCoeff, penalty=None, constraintFunction=g, + constraintNum = self._numOfConst, type=self._minMax) self._collectOptPoint(rlz, offSpringFitness, objectiveVal, g) self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info) - else: # This is a multi-objective Optimization case + else: # This is for a multi-objective Optimization case. objectiveVal = [] offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) for i in range(len(self._objectiveVar)): objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - # collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) + # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) constraintData = {} if self._constraintFunctions or self._impConstraintFunctions: params = [] @@ -541,8 +595,7 @@ def _useRealization(self, info, rlz): params += y.parameterNames() for p in list(set(params) -set(self._objectiveVar) -set(list(self.toBeSampled.keys()))): constraintData[p] = list(np.atleast_1d(rlz[p].data)) - # Compute constraint function g_j(x) for all constraints (j = 1 .. J) - # and all x's (individuals) in the population + # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) g = xr.DataArray(g0, @@ -569,15 +622,14 @@ def _useRealization(self, info, rlz): objVar=self._objectiveVar, a=self._objCoeff, b=self._penaltyCoeff, - penalty =None, constraintFunction=g, + constraintNum = self._numOfConst, type =self._minMax) - # 0.2@ n-1: Survivor selection(rlz) - # update population container given obtained children - + # 0.2@ n-1: Survivor selection(rlz) + # Update population container given obtained children if self._activeTraj: - if len(self._objectiveVar) == 1: # If the number of objectives is just 1: + if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. if self.counter > 1: self.population, self.fitness,\ age,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, @@ -593,7 +645,7 @@ def _useRealization(self, info, rlz): self.fitness = offSpringFitness self.objectiveVal = rlz[self._objectiveVar[0]].data - else: # If the number of objectives is more than 1: + else: # This is for a multi-objective Optimization case. if self.counter > 1: self.population,self.rank, \ self.popAge,self.crowdingDistance, \ @@ -675,16 +727,16 @@ def _useRealization(self, info, rlz): self.fitness, self.constraintsV) self._resolveNewGenerationMulti(traj, rlz, info) + # 1 @ n: Parent selection from population # pair parents together by indexes - - if len(self._objectiveVar) == 1: # If the number of objectives is just 1: + if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. parents = self._parentSelectionInstance(self.population, variables=list(self.toBeSampled), fitness=self.fitness, nParents=self._nParents) - else: # This is for a multi-objective Optimization case + else: # This is for a multi-objective Optimization case. parents = self._parentSelectionInstance(self.population, variables=list(self.toBeSampled), @@ -695,14 +747,14 @@ def _useRealization(self, info, rlz): ) # 2 @ n: Crossover from set of parents - # create childrenCoordinates (x1,...,xM) + # Create childrenCoordinates (x1,...,xM) childrenXover = self._crossoverInstance(parents=parents, variables=list(self.toBeSampled), crossoverProb=self._crossoverProb, points=self._crossoverPoints) # 3 @ n: Mutation - # perform random directly on childrenCoordinates + # Perform random directly on childrenCoordinates childrenMutated = self._mutationInstance(offSprings=childrenXover, distDict=self.distDict, locs=self._mutationLocs, @@ -710,7 +762,7 @@ def _useRealization(self, info, rlz): variables=list(self.toBeSampled)) # 4 @ n: repair/replacement - # repair should only happen if multiple genes in a single chromosome have the same values (), + # Repair should only happen if multiple genes in a single chromosome have the same values (), # and at the same time the sampling of these genes should be with Out replacement. needsRepair = False for chrom in range(self._nChildren): @@ -734,7 +786,7 @@ def _useRealization(self, info, rlz): 'Gene':list(self.toBeSampled)}) # 5 @ n: Submit children batch - # submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates + # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates for i in range(self.batch): newRlz = {} for _, var in enumerate(self.toBeSampled.keys()): @@ -758,7 +810,6 @@ def _submitRun(self, point, traj, step, moreInfo=None): }) # NOTE: Currently, GA treats explicit and implicit constraints similarly # while box constraints (Boundary constraints) are automatically handled via limits of the distribution - # self.raiseADebug(f'Adding run to queue: {self.denormalizeData(point)} | {info}') self._submissionQueue.append((point, info)) @@ -967,7 +1018,7 @@ def _collectOptPointMulti(self, population, rank, CD, objVal, fitness, constrain self.multiBestFitness = fitSet self.multiBestObjective = optObjVal self.multiBestConstraint = optConstNew - self.multiBestRank = optRank #TODO JY: MultiBestRank is not anymore in need. This should be removed later. + self.multiBestRank = optRank self.multiBestCD = optCD return optPointsDic @@ -1018,7 +1069,7 @@ def _checkConvObjective(self, traj, **kwargs): @ In, kwargs, dict, dictionary of parameters for convergence criteria @ Out, converged, bool, convergence state """ - if len(self._objectiveVar) == 1: # single objective optimization + if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. if len(self._optPointHistory[traj]) < 2: return False o1, _ = self._optPointHistory[traj][-1] @@ -1028,19 +1079,13 @@ def _checkConvObjective(self, traj, **kwargs): conv=str(converged), got=obj, req=self._convergenceCriteria['objective'])) - else: # multi objective optimization + else: # This is for a multi-objective Optimization case. if len(self._optPointHistory[traj]) < 2: return False o1, _ = self._optPointHistory[traj][-1] obj1 = o1[self._objectiveVar[0]] obj2 = o1[self._objectiveVar[1]] converged = (obj1 == self._convergenceCriteria['objective'] and obj2 == self._convergenceCriteria['objective']) - # JY: I stopped here. Codeline below needs to be revisited! 01/16/23 - # self.raiseADebug(self.convFormat.format(name='objective', - # conv=str(converged), - # got=obj1, - # req=self._convergenceCriteria['objective'])) - return converged def _checkConvAHDp(self, traj, **kwargs): @@ -1161,7 +1206,7 @@ def _updateConvergence(self, traj, new, old, acceptable): @ Out, converged, bool, True if converged on ANY criteria """ # NOTE we have multiple "if acceptable" trees here, as we need to update soln export regardless - if len(self._objectiveVar) == 1: # single-objective optimization + if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. if acceptable == 'accepted': self.raiseADebug(f'Convergence Check for Trajectory {traj}:') # check convergence @@ -1170,7 +1215,7 @@ def _updateConvergence(self, traj, new, old, acceptable): converged = False convDict = dict((var, False) for var in self._convergenceInfo[traj]) self._convergenceInfo[traj].update(convDict) - else: # multi-objective optimization + else: # This is for a multi-objective Optimization case. if acceptable == 'accepted': self.raiseADebug(f'Convergence Check for Trajectory {traj}:') # check convergence @@ -1214,8 +1259,9 @@ def _rejectOptPoint(self, traj, info, old): """ return - # * * * * * * * * * * * * - # Constraint Handling + ############################### + # Constraint Handling # + ############################### def _handleExplicitConstraints(self, point, constraint): """ Computes explicit (i.e. input-based) constraints @@ -1278,9 +1324,9 @@ def _checkImpFunctionalConstraints(self, point, opt, impConstraint): g = impConstraint.evaluate('implicitConstraint', inputs) return g - - # END constraint handling - # * * * * * * * * * * * * + ############################### + # END constraint handling # + ############################### def _addToSolutionExport(self, traj, rlz, acceptable): """ Contributes additional entries to the solution export. diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 3384cf98d0..68d8dfe07a 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -16,7 +16,8 @@ currently the implemented fitness function is a linear combination of the objective function and penalty function for constraint violation: Created June,16,2020 - @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi + Updated September,17,2023 + @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim """ # Internal Modules---------------------------------------------------------------------------------- from ...utils import frontUtils @@ -59,26 +60,29 @@ def invLinear(rlz,**kwargs): @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ if kwargs['a'] == None: - a = 1.0 + a = [1.0] else: a = kwargs['a'] if kwargs['b'] == None: - b = 10.0 + b = [10.0] else: b = kwargs['b'] if kwargs['constraintFunction'].all() == None: penalty = 0.0 else: penalty = kwargs['constraintFunction'].data - - objVar = kwargs['objVar'] - data = np.atleast_1d(rlz[objVar].data) - - fitness = -a * (rlz[objVar].data).reshape(-1,1) - b * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1) - fitness = xr.DataArray(np.squeeze(fitness), - dims=['chromosome'], - coords={'chromosome': np.arange(len(data))}) - return fitness + objVar = [kwargs['objVar']] + for j in range(len(objVar)): + data = np.atleast_1d(rlz[objVar][objVar[j]].data) + fitness = -a[0] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[0] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1) + fitness = xr.DataArray(np.squeeze(fitness), + dims=['chromosome'], + coords={'chromosome': np.arange(len(data))}) + if j == 0: + fitnessSet = fitness.to_dataset(name = objVar[j]) + else: + fitnessSet[objVar[j]] = fitness + return fitnessSet def hardConstraint(rlz,**kwargs): r""" @@ -150,23 +154,26 @@ def feasibleFirst(rlz,**kwargs): objVar = [kwargs['objVar']] else: objVar = kwargs['objVar'] - g = kwargs['constraintFunction'] - penalty = kwargs['b'] - pen = [penalty[i:i+len(g['Constraint'].data)] for i in range(0, len(penalty), len(g['Constraint'].data))] + if kwargs['constraintNum'] == 0: + pen = kwargs['b'] + else: + g = kwargs['constraintFunction'] + penalty = kwargs['b'] + pen = [penalty[i:i+len(g['Constraint'].data)] for i in range(0, len(penalty), len(g['Constraint'].data))] + objPen = dict(map(lambda i,j : (i,j), objVar, pen)) - for i in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[i]].data) worstObj = max(data) fitness = [] for ind in range(data.size): - if np.all(g.data[ind, :]>=0): + if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0): fit=(data[ind]) else: fit = worstObj for constInd,_ in enumerate(g['Constraint'].data): fit+= objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." - fitness.append(fit) + fitness.append(-1*fit) fitness = xr.DataArray(np.array(fitness), dims=['chromosome'], coords={'chromosome': np.arange(len(data))}) @@ -194,25 +201,30 @@ def logistic(rlz,**kwargs): @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ if kwargs['a'] == None: - a = 1.0 + a = [1.0] else: a = kwargs['a'] if kwargs['b'] == None: - b = 0.0 + b = [0.0] else: b = kwargs['b'] - objVar = kwargs['objVar'] - val = rlz[objVar] - data = np.atleast_1d(rlz[objVar].data) - denom = 1.0 + np.exp(-a * (val - b)) - fitness = 1.0 / denom - fitness = xr.DataArray(np.array(fitness), - dims=['chromosome'], - coords={'chromosome': np.arange(len(data))}) + objVar = [kwargs['objVar']] + for i in range(len(objVar)): + val = rlz[objVar][objVar[i]].data + data = np.atleast_1d(rlz[objVar][objVar[i]].data) + denom = 1.0 + np.exp(-a[0] * (val - b[0])) + fitness = 1.0 / denom + fitness = xr.DataArray(fitness.data, + dims=['chromosome'], + coords={'chromosome': np.arange(len(data))}) + if i == 0: + fitnessSet = fitness.to_dataset(name = objVar[i]) + else: + fitnessSet[objVar[i]] = fitness - return fitness + return fitnessSet __fitness = {} diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml index 04cd9bce62..d100a928b0 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml @@ -64,7 +64,7 @@ 1.0 - + rankNcrowdingBased From 8a262855fc4ad0e597481ae27501f73bfd30559e Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 21 Sep 2023 14:16:05 -0600 Subject: [PATCH 47/84] multi-objective optimization using invLinear and logistics now works. --- ravenframework/Optimizers/fitness/fitness.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 68d8dfe07a..3dc0625e75 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -71,7 +71,10 @@ def invLinear(rlz,**kwargs): penalty = 0.0 else: penalty = kwargs['constraintFunction'].data - objVar = [kwargs['objVar']] + if isinstance(kwargs['objVar'], str) == True: + objVar = [kwargs['objVar']] + else: + objVar = kwargs['objVar'] for j in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[j]].data) fitness = -a[0] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[0] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1) @@ -204,13 +207,14 @@ def logistic(rlz,**kwargs): a = [1.0] else: a = kwargs['a'] - if kwargs['b'] == None: b = [0.0] else: b = kwargs['b'] - - objVar = [kwargs['objVar']] + if isinstance(kwargs['objVar'], str) == True: + objVar = [kwargs['objVar']] + else: + objVar = kwargs['objVar'] for i in range(len(objVar)): val = rlz[objVar][objVar[i]].data data = np.atleast_1d(rlz[objVar][objVar[i]].data) From 51eb867199c1a5a3e569bec12aca719d9856b11f Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 21 Sep 2023 14:56:32 -0600 Subject: [PATCH 48/84] constraint handling for single and multi objective optimization in _useRealization method is now separated and new methods called singleConstraint and multiConstraint are created to avoid if-else statement for single/multi objective optimization. --- ravenframework/Optimizers/GeneticAlgorithm.py | 170 ++++++++++-------- 1 file changed, 92 insertions(+), 78 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index cff7e23c89..fced405aed 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -507,32 +507,11 @@ def needDenormalized(self): # overload as needed in inheritors return True - #################################################################################### - # Run Methods # - #################################################################################### - - def _useRealization(self, info, rlz): - """ - Used to feedback the collected runs into actionable items within the sampler. - This is called by localFinalizeActualSampling, and hence should contain the main skeleton. - @ In, info, dict, identifying information about the realization - @ In, rlz, xr.Dataset, new batched realizations - @ Out, None - """ - # The whole skeleton should be here, this should be calling all classes and _private methods. + def singleConstraint(self, info, rlz): traj = info['traj'] for t in self._activeTraj[1:]: self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) self.incrementIteration(traj) - info['step'] = self.counter - - # Developer note: each algorithm step is indicated by a number followed by the generation number - # e.g., '0 @ n-1' refers to step 0 for generation n-1 (i.e., previous generation) - # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams - - # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) - # 0.1 @ n-1: fitnessCalculation(rlz) - # perform fitness calculation for newly obtained children (rlz) if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) @@ -550,13 +529,9 @@ def _useRealization(self, info, rlz): g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) g = xr.DataArray(g0, - dims=['chromosome','Constraint'], - coords={'chromosome':np.arange(np.shape(offSprings)[0]), - 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) - # FIXME The constraint handling is following the structure of the RavenSampled.py, - # there are many utility functions that can be simplified and/or merged together - # _check, _handle, and _apply, for explicit and implicit constraints. - # This can be simplified in the near future in GradientDescent, SimulatedAnnealing, and here in GA + dims=['chromosome','Constraint'], + coords={'chromosome':np.arange(np.shape(offSprings)[0]), + 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) for index,individual in enumerate(offSprings): newOpt = individual opt = {self._objectiveVar[0]:objectiveVal[index]} @@ -570,64 +545,103 @@ def _useRealization(self, info, rlz): g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) offSpringFitness = self._fitnessInstance(rlz, - objVar=self._objectiveVar[0], - a=self._objCoeff, - b=self._penaltyCoeff, - penalty=None, - constraintFunction=g, - constraintNum = self._numOfConst, - type=self._minMax) + objVar=self._objectiveVar[0], + a=self._objCoeff, + b=self._penaltyCoeff, + penalty=None, + constraintFunction=g, + constraintNum = self._numOfConst, + type=self._minMax) self._collectOptPoint(rlz, offSpringFitness, objectiveVal, g) self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info) + return traj, g, objectiveVal, offSprings, offSpringFitness + + def multiConstraint(self, info, rlz): + traj = info['traj'] + for t in self._activeTraj[1:]: + self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) + self.incrementIteration(traj) - else: # This is for a multi-objective Optimization case. - objectiveVal = [] - offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) - for i in range(len(self._objectiveVar)): - objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + objectiveVal = [] + offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) + for i in range(len(self._objectiveVar)): + objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) + constraintData = {} + if self._constraintFunctions or self._impConstraintFunctions: + params = [] + for y in (self._constraintFunctions + self._impConstraintFunctions): + params += y.parameterNames() + for p in list(set(params) -set(self._objectiveVar) -set(list(self.toBeSampled.keys()))): + constraintData[p] = list(np.atleast_1d(rlz[p].data)) + # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population + g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) + + g = xr.DataArray(g0, + dims=['chromosome','Constraint'], + coords={'chromosome':np.arange(np.shape(offSprings)[0]), + 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) + + for index,individual in enumerate(offSprings): + newOpt = individual + objOpt = dict(zip(self._objectiveVar, + list(map(lambda x:-1 if x=="max" else 1 , self._minMax)))) + opt = dict(zip(self._objectiveVar, [item[index] for item in objectiveVal])) + opt = {k: objOpt[k]*opt[k] for k in opt} + for p, v in constraintData.items(): + opt[p] = v[index] + + for constIndex, constraint in enumerate(self._constraintFunctions + self._impConstraintFunctions): + if constraint in self._constraintFunctions: + g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint) + else: + g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) - # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions) - constraintData = {} - if self._constraintFunctions or self._impConstraintFunctions: - params = [] - for y in (self._constraintFunctions + self._impConstraintFunctions): - params += y.parameterNames() - for p in list(set(params) -set(self._objectiveVar) -set(list(self.toBeSampled.keys()))): - constraintData[p] = list(np.atleast_1d(rlz[p].data)) - # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population - g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) + offSpringFitness = self._fitnessInstance(rlz, + objVar=self._objectiveVar, + a=self._objCoeff, + b=self._penaltyCoeff, + constraintFunction=g, + constraintNum = self._numOfConst, + type =self._minMax) + return traj, g, objectiveVal, offSprings, offSpringFitness - g = xr.DataArray(g0, - dims=['chromosome','Constraint'], - coords={'chromosome':np.arange(np.shape(offSprings)[0]), - 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) - for index,individual in enumerate(offSprings): - newOpt = individual - objOpt = dict(zip(self._objectiveVar, - list(map(lambda x:-1 if x=="max" else 1 , self._minMax)))) - opt = dict(zip(self._objectiveVar, [item[index] for item in objectiveVal])) - opt = {k: objOpt[k]*opt[k] for k in opt} - for p, v in constraintData.items(): - opt[p] = v[index] - for constIndex, constraint in enumerate(self._constraintFunctions + self._impConstraintFunctions): - if constraint in self._constraintFunctions: - g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint) - else: - g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) + ######################################################################################################### + # Run Methods # + ######################################################################################################### - offSpringFitness = self._fitnessInstance(rlz, - objVar=self._objectiveVar, - a=self._objCoeff, - b=self._penaltyCoeff, - constraintFunction=g, - constraintNum = self._numOfConst, - type =self._minMax) - - # 0.2@ n-1: Survivor selection(rlz) - # Update population container given obtained children + ######################################################################################################### + # Developer note: + # Each algorithm step is indicated by a number followed by the generation number + # e.g., '0 @ n-1' refers to step 0 for generation n-1 (i.e., previous generation) + # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams. + ######################################################################################################### + + def _useRealization(self, info, rlz): + """ + Used to feedback the collected runs into actionable items within the sampler. + This is called by localFinalizeActualSampling, and hence should contain the main skeleton. + @ In, info, dict, identifying information about the realization + @ In, rlz, xr.Dataset, new batched realizations + @ Out, None + """ + + info['step'] = self.counter + + # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) + # 0.1 @ n-1: fitnessCalculation(rlz): Perform fitness calculation for newly obtained children (rlz) + + objInd = 1 if len(self._objectiveVar) == 1 else 2 + constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint} + const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint) + traj, g, objectiveVal, offSprings, offSpringFitness = const(self, info, rlz) + + + # 0.2@ n-1: Survivor selection(rlz): Update population container given obtained children if self._activeTraj: if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. if self.counter > 1: From 061c3bc4f608485183c9896b3738ccc078b18ac4 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Mon, 25 Sep 2023 16:41:57 -0600 Subject: [PATCH 49/84] 1. If-else statement for survivorSelection in _useRealization is removed and cleaned. Still, some regression tests are failed due to different age at the final solution. 2. Code updates are on going according to Mohammad's comments. --- ravenframework/Optimizers/GeneticAlgorithm.py | 471 +++++++++--------- ravenframework/Optimizers/fitness/fitness.py | 70 +-- 2 files changed, 271 insertions(+), 270 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index fced405aed..9b885225c0 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -75,42 +75,41 @@ def __init__(self): self.popAge = None # population age self.fitness = None # population fitness self.rank = None # population rank (for Multi-objective optimization only) - self.constraints = None - self.constraintsV = None + self.constraintsV = None # // self.crowdingDistance = None # population crowding distance (for Multi-objective optimization only) self.ahdp = np.NaN # p-Average Hausdorff Distance between populations self.ahd = np.NaN # Hausdorff Distance between populations - self.bestPoint = None - self.bestFitness = None - self.bestObjective = None - self.multiBestPoint = None - self.multiBestFitness = None - self.multiBestObjective = None - self.multiBestConstraint = None - self.multiBestRank = None - self.multiBestCD = None - self.objectiveVal = None - self._populationSize = None - self._parentSelectionType = None - self._parentSelectionInstance = None - self._nParents = None - self._nChildren = None - self._crossoverType = None - self._crossoverPoints = None - self._crossoverProb = None - self._crossoverInstance = None - self._mutationType = None - self._mutationLocs = None - self._mutationProb = None - self._mutationInstance = None - self._survivorSelectionType = None - self._survivorSelectionInstance = None - self._fitnessType = None - self._objCoeff = None - self._penaltyCoeff = None - self._fitnessInstance = None - self._repairInstance = None - self._canHandleMultiObjective = True + self.bestPoint = None # // + self.bestFitness = None # // + self.bestObjective = None # // + self.multiBestPoint = None # // + self.multiBestFitness = None # // + self.multiBestObjective = None # // + self.multiBestConstraint = None # // + self.multiBestRank = None # // + self.multiBestCD = None # // + self.objectiveVal = None # // + self._populationSize = None # // + self._parentSelectionType = None # // + self._parentSelectionInstance = None # // + self._nParents = None # // + self._nChildren = None # // + self._crossoverType = None # // + self._crossoverPoints = None # // + self._crossoverProb = None # // + self._crossoverInstance = None # // + self._mutationType = None # // + self._mutationLocs = None # // + self._mutationProb = None # // + self._mutationInstance = None # // + self._survivorSelectionType = None # // + self._survivorSelectionInstance = None # // + self._fitnessType = None # // + self._objCoeff = None # // + self._penaltyCoeff = None # // + self._fitnessInstance = None # // + self._repairInstance = None # // + self._canHandleMultiObjective = True # // ########################## # Initialization Methods # @@ -165,11 +164,11 @@ def getInputSpecification(cls): \item ageBased. \item fitnessBased. \end{itemize} - \item constraintHandling: - \begin{itemize} - \item hard. - \item soft. - \end{itemize} + # \item constraintHandling: + # \begin{itemize} + # \item hard. + # \item soft. + # \end{itemize} \end{itemize}""") # Population Size populationSize = InputData.parameterInputFactory('populationSize', strictMode=True, @@ -179,11 +178,12 @@ def getInputSpecification(cls): GAparams.addSub(populationSize) # Constraint Handling - constraintHandling = InputData.parameterInputFactory('constraintHandling', strictMode=True, - contentType=InputTypes.StringType, - printPriority=108, - descr=r"""a node indicating whether GA will handle constraints hardly or softly.""") - GAparams.addSub(constraintHandling) + #NOTE An indicator saying whather GA will handle constraint hardly or softly will be upgraded later @JunyungKim + # constraintHandling = InputData.parameterInputFactory('constraintHandling', strictMode=True, + # contentType=InputTypes.StringType, + # printPriority=108, + # descr=r"""a node indicating whether GA will handle constraints hardly or softly.""") + # GAparams.addSub(constraintHandling) # Parent Selection parentSelection = InputData.parameterInputFactory('parentSelection', strictMode=True, @@ -271,8 +271,6 @@ def getInputSpecification(cls): c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$ - d. hardConstraint: $fitness = the number of constraints violated.$ - """) fitness.addParam("type", InputTypes.StringType, True, descr=r"""[invLin, logistic, feasibleFirst, hardConstraint]""") @@ -352,7 +350,7 @@ def handleInput(self, paramInput): self._populationSize = populationSizeNode.value #################################################################################### - # parent selection node # + # parent selection # #################################################################################### parentSelectionNode = gaParamsNode.findFirst('parentSelection') self._parentSelectionType = parentSelectionNode.value @@ -361,14 +359,14 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') #################################################################################### - # reproduction node # + # reproduction # #################################################################################### reproductionNode = gaParamsNode.findFirst('reproduction') self._nParents = int(np.ceil(1/2 + np.sqrt(1+4*self._populationSize)/2)) self._nChildren = int(2*comb(self._nParents,2)) #################################################################################### - # crossover node # + # crossover # #################################################################################### crossoverNode = reproductionNode.findFirst('crossover') self._crossoverType = crossoverNode.parameterValues['type'] @@ -382,7 +380,7 @@ def handleInput(self, paramInput): self._crossoverInstance = crossoversReturnInstance(self,name = self._crossoverType) #################################################################################### - # mutation node # + # mutation # #################################################################################### mutationNode = reproductionNode.findFirst('mutation') self._mutationType = mutationNode.parameterValues['type'] @@ -396,7 +394,7 @@ def handleInput(self, paramInput): self._mutationInstance = mutatorsReturnInstance(self,name = self._mutationType) #################################################################################### - # survivor selection node # + # survivor selection # #################################################################################### survivorSelectionNode = gaParamsNode.findFirst('survivorSelection') self._survivorSelectionType = survivorSelectionNode.value @@ -407,13 +405,13 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ') #################################################################################### - # fitness node # + # fitness # #################################################################################### fitnessNode = gaParamsNode.findFirst('fitness') self._fitnessType = fitnessNode.parameterValues['type'] #################################################################################### - # constraint node # + # constraint # #################################################################################### # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','logistic', 'feasibleFirst','hardConstraint']: @@ -450,7 +448,7 @@ def handleInput(self, paramInput): self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. #################################################################################### - # convergence criterion node # + # convergence criterion # #################################################################################### convNode = paramInput.findFirst('convergence') if convNode is not None: @@ -507,11 +505,8 @@ def needDenormalized(self): # overload as needed in inheritors return True - def singleConstraint(self, info, rlz): + def singleObjConstraint(self, info, rlz): traj = info['traj'] - for t in self._activeTraj[1:]: - self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) - self.incrementIteration(traj) if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) @@ -545,23 +540,20 @@ def singleConstraint(self, info, rlz): g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) offSpringFitness = self._fitnessInstance(rlz, - objVar=self._objectiveVar[0], - a=self._objCoeff, - b=self._penaltyCoeff, - penalty=None, - constraintFunction=g, - constraintNum = self._numOfConst, - type=self._minMax) + objVar=self._objectiveVar[0], + a=self._objCoeff, + b=self._penaltyCoeff, + penalty=None, + constraintFunction=g, + constraintNum = self._numOfConst, + type=self._minMax) self._collectOptPoint(rlz, offSpringFitness, objectiveVal, g) self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info) return traj, g, objectiveVal, offSprings, offSpringFitness - def multiConstraint(self, info, rlz): + def multiObjConstraint(self, info, rlz): traj = info['traj'] - for t in self._activeTraj[1:]: - self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) - self.incrementIteration(traj) objectiveVal = [] offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) @@ -600,14 +592,112 @@ def multiConstraint(self, info, rlz): g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint) offSpringFitness = self._fitnessInstance(rlz, - objVar=self._objectiveVar, - a=self._objCoeff, - b=self._penaltyCoeff, - constraintFunction=g, - constraintNum = self._numOfConst, - type =self._minMax) + objVar=self._objectiveVar, + a=self._objCoeff, + b=self._penaltyCoeff, + constraintFunction=g, + constraintNum = self._numOfConst, + type =self._minMax + ) return traj, g, objectiveVal, offSprings, offSpringFitness + def singleObjSurvivorSelect(self, info, rlz): + + traj, g, objectiveVal, offSprings, offSpringFitness = GeneticAlgorithm.singleObjConstraint(self, info, rlz) + + if self.counter > 1: + self.population, self.fitness,\ + self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + fitness=self.fitness, + newRlz=rlz, + offSpringsFitness=offSpringFitness, + popObjectiveVal=self.objectiveVal + ) + else: + self.population = offSprings + self.fitness = offSpringFitness + self.objectiveVal = rlz[self._objectiveVar[0]].data + + + def multiObjSurvivorSelect(self, info, rlz): + + traj, g, objectiveVal, offSprings, offSpringFitness = GeneticAlgorithm.multiObjConstraint(self, info, rlz) + + if self.counter > 1: + self.population,self.rank, \ + self.popAge,self.crowdingDistance, \ + self.objectiveVal,self.fitness, \ + self.constraintsV = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + offsprings=rlz, + popObjectiveVal=self.objectiveVal, + offObjectiveVal=objectiveVal, + popFit = self.fitness, + offFit = offSpringFitness, + popConstV = self.constraintsV, + offConstV = g + ) + else: + self.population = offSprings + self.fitness = offSpringFitness + self.constraintsV = g + + # offspringObjsVals for Rank and CD calculation + offObjVal = [] + for i in range(len(self._objectiveVar)): + offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + # offspringFitVals for Rank and CD calculation + fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data + offspringFitVals = fitVal.tolist() + offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) + self.rank = xr.DataArray(offSpringRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(offSpringRank)[0])}) + offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, + popSize=len(offSpringRank), + objectives=np.array(offspringFitVals)) + + self.crowdingDistance = xr.DataArray(offSpringCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) + + self.objectiveVal = [] + for i in range(len(self._objectiveVar)): + self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + self._collectOptPointMulti(self.population, + self.rank, + self.crowdingDistance, + self.objectiveVal, + self.fitness, + self.constraintsV) + self._resolveNewGenerationMulti(traj, rlz, info) + + ####################################################################################################### + ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used + ## These are currently for debugging purposes @JunyungKim + import matplotlib.pyplot as plt + + signChange = list(map(lambda x:-1 if x=="max" else 1 , self._minMax)) + for i in range(0, len(self.multiBestObjective)): + newMultiBestObjective = self.multiBestObjective * signChange + + plt.title(str('BatchID = ' + str(self.batchId))) + plt.plot(newMultiBestObjective[:,0], + newMultiBestObjective[:,1],'*') + + for i in range(len(self.multiBestObjective[:,0])): + plt.text(newMultiBestObjective[i,0], + newMultiBestObjective[i,1], str(self.batchId)) + # plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') + plt.savefig('PF_'+str(self.batchId)+'.png') + ####################################################################################################### + + ######################################################################################################### @@ -630,182 +720,86 @@ def _useRealization(self, info, rlz): @ Out, None """ + traj = info['traj'] + for t in self._activeTraj[1:]: + self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) + self.incrementIteration(traj) info['step'] = self.counter + objInd = 1 if len(self._objectiveVar) == 1 else 2 # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) # 0.1 @ n-1: fitnessCalculation(rlz): Perform fitness calculation for newly obtained children (rlz) + # fitness calculation method (i.e., singleObjConstraint or multiObjConstraint) will be called by either singleObjSurvivorSelect or multiObjSurvivorSelect. - objInd = 1 if len(self._objectiveVar) == 1 else 2 - constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint} - const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint) - traj, g, objectiveVal, offSprings, offSpringFitness = const(self, info, rlz) + # 0.2@ n-1: Survivor selection(rlz): Update population container given obtained children + survivorSelectionFuncs: dict = {1: GeneticAlgorithm.singleObjSurvivorSelect, 2: GeneticAlgorithm.multiObjSurvivorSelect} + survivor = survivorSelectionFuncs.get(objInd, GeneticAlgorithm.singleObjSurvivorSelect) + survivor(self, info, rlz) + # 1 @ n: Parent selection from population + # pair parents together by indexes + if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. + parents = self._parentSelectionInstance(self.population, + variables=list(self.toBeSampled), + fitness=self.fitness, + nParents=self._nParents) - # 0.2@ n-1: Survivor selection(rlz): Update population container given obtained children - if self._activeTraj: - if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. - if self.counter > 1: - self.population, self.fitness,\ - age,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - fitness=self.fitness, - newRlz=rlz, - offSpringsFitness=offSpringFitness, - popObjectiveVal=self.objectiveVal) - self.popAge = age - else: - self.population = offSprings - self.fitness = offSpringFitness - self.objectiveVal = rlz[self._objectiveVar[0]].data - - else: # This is for a multi-objective Optimization case. - if self.counter > 1: - self.population,self.rank, \ - self.popAge,self.crowdingDistance, \ - self.objectiveVal,self.fitness, \ - self.constraintsV = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - offsprings=rlz, - popObjectiveVal=self.objectiveVal, - offObjectiveVal=objectiveVal, - popFit = self.fitness, - offFit = offSpringFitness, - popConstV = self.constraintsV, - offConstV = g - ) - - - - self._collectOptPointMulti(self.population, - self.rank, - self.crowdingDistance, - self.objectiveVal, - self.fitness, - self.constraintsV) - self._resolveNewGenerationMulti(traj, rlz, info) - - ############################################################################## - ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used - ## These are currently for debugging purposes - import matplotlib.pyplot as plt - - signChange = list(map(lambda x:-1 if x=="max" else 1 , self._minMax)) - for i in range(0, len(self.multiBestObjective)): - newMultiBestObjective = self.multiBestObjective * signChange - - plt.title(str('BatchID = ' + str(self.batchId))) - plt.plot(newMultiBestObjective[:,0], - newMultiBestObjective[:,1],'*') - - for i in range(len(self.multiBestObjective[:,0])): - plt.text(newMultiBestObjective[i,0], - newMultiBestObjective[i,1], str(self.batchId)) - plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') - ############################################################################## + else: # This is for a multi-objective Optimization case. - else: - self.population = offSprings - self.fitness = offSpringFitness - self.constraintsV = g - - # offspringObjsVals for Rank and CD calculation - offObjVal = [] - for i in range(len(self._objectiveVar)): - offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - - # offspringFitVals for Rank and CD calculation - fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data - offspringFitVals = fitVal.tolist() - offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) - self.rank = xr.DataArray(offSpringRank, - dims=['rank'], - coords={'rank': np.arange(np.shape(offSpringRank)[0])}) - offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, - popSize=len(offSpringRank), - objectives=np.array(offspringFitVals)) - - self.crowdingDistance = xr.DataArray(offSpringCD, - dims=['CrowdingDistance'], - coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) - - self.objectiveVal = [] - for i in range(len(self._objectiveVar)): - self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - - self._collectOptPointMulti(self.population, - self.rank, - self.crowdingDistance, - self.objectiveVal, - self.fitness, - self.constraintsV) - self._resolveNewGenerationMulti(traj, rlz, info) - - # 1 @ n: Parent selection from population - # pair parents together by indexes - if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. - parents = self._parentSelectionInstance(self.population, - variables=list(self.toBeSampled), - fitness=self.fitness, - nParents=self._nParents) - - else: # This is for a multi-objective Optimization case. - - parents = self._parentSelectionInstance(self.population, - variables=list(self.toBeSampled), - nParents=self._nParents, - rank = self.rank, - crowdDistance = self.crowdingDistance, - fitness = self.fitness - ) - - # 2 @ n: Crossover from set of parents - # Create childrenCoordinates (x1,...,xM) - childrenXover = self._crossoverInstance(parents=parents, + parents = self._parentSelectionInstance(self.population, variables=list(self.toBeSampled), - crossoverProb=self._crossoverProb, - points=self._crossoverPoints) - - # 3 @ n: Mutation - # Perform random directly on childrenCoordinates - childrenMutated = self._mutationInstance(offSprings=childrenXover, - distDict=self.distDict, - locs=self._mutationLocs, - mutationProb=self._mutationProb, - variables=list(self.toBeSampled)) - - # 4 @ n: repair/replacement - # Repair should only happen if multiple genes in a single chromosome have the same values (), - # and at the same time the sampling of these genes should be with Out replacement. - needsRepair = False - for chrom in range(self._nChildren): - unique = set(childrenMutated.data[chrom, :]) - if len(childrenMutated.data[chrom,:]) != len(unique): - for var in self.toBeSampled: # TODO: there must be a smarter way to check if a variables strategy is without replacement - if (hasattr(self.distDict[var], 'strategy') and self.distDict[var].strategy == 'withoutReplacement'): - needsRepair = True - break - if needsRepair: - children = self._repairInstance(childrenMutated,variables=list(self.toBeSampled),distInfo=self.distDict) - else: - children = childrenMutated + nParents=self._nParents, + rank = self.rank, + crowdDistance = self.crowdingDistance, + fitness = self.fitness + ) + + # 2 @ n: Crossover from set of parents + # Create childrenCoordinates (x1,...,xM) + childrenXover = self._crossoverInstance(parents=parents, + variables=list(self.toBeSampled), + crossoverProb=self._crossoverProb, + points=self._crossoverPoints) + + # 3 @ n: Mutation + # Perform random directly on childrenCoordinates + childrenMutated = self._mutationInstance(offSprings=childrenXover, + distDict=self.distDict, + locs=self._mutationLocs, + mutationProb=self._mutationProb, + variables=list(self.toBeSampled)) + + # 4 @ n: repair/replacement + # Repair should only happen if multiple genes in a single chromosome have the same values (), + # and at the same time the sampling of these genes should be with Out replacement. + needsRepair = False + for chrom in range(self._nChildren): + unique = set(childrenMutated.data[chrom, :]) + if len(childrenMutated.data[chrom,:]) != len(unique): + for var in self.toBeSampled: # TODO: there must be a smarter way to check if a variables strategy is without replacement + if (hasattr(self.distDict[var], 'strategy') and self.distDict[var].strategy == 'withoutReplacement'): + needsRepair = True + break + if needsRepair: + children = self._repairInstance(childrenMutated,variables=list(self.toBeSampled),distInfo=self.distDict) + else: + children = childrenMutated - # keeping the population size constant by ignoring the excessive children - children = children[:self._populationSize, :] + # keeping the population size constant by ignoring the excessive children + children = children[:self._populationSize, :] - daChildren = xr.DataArray(children, - dims=['chromosome','Gene'], - coords={'chromosome': np.arange(np.shape(children)[0]), - 'Gene':list(self.toBeSampled)}) + daChildren = xr.DataArray(children, + dims=['chromosome','Gene'], + coords={'chromosome': np.arange(np.shape(children)[0]), + 'Gene':list(self.toBeSampled)}) - # 5 @ n: Submit children batch - # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates - for i in range(self.batch): - newRlz = {} - for _, var in enumerate(self.toBeSampled.keys()): - newRlz[var] = float(daChildren.loc[i, var].values) - self._submitRun(newRlz, traj, self.getIteration(traj)) + # 5 @ n: Submit children batch + # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates + for i in range(self.batch): + newRlz = {} + for _, var in enumerate(self.toBeSampled.keys()): + newRlz[var] = float(daChildren.loc[i, var].values) + self._submitRun(newRlz, traj, self.getIteration(traj)) def _submitRun(self, point, traj, step, moreInfo=None): """ @@ -931,7 +925,6 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): print("### self.population.shape is {}".format(self.population.shape)) for i in range(rlz.sizes['RAVEN_sample_ID']): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) - # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) rlzDict['batchId'] = rlz['batchId'].data[i] for j in range(len(self._objectiveVar)): diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 3dc0625e75..bd4b5d834e 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -87,38 +87,39 @@ def invLinear(rlz,**kwargs): fitnessSet[objVar[j]] = fitness return fitnessSet -def hardConstraint(rlz,**kwargs): - r""" - Fitness method counting the number of constraints violated +#NOTE hardConstraint method will be used later once constraintHandling is realized. Until then, it will be commented. @JunyungKim +# def hardConstraint(rlz,**kwargs): +# r""" +# Fitness method counting the number of constraints violated - @ In, rlz, xr.Dataset, containing the evaluation of a certain - set of individuals (can be the initial population for the very first iteration, - or a population of offsprings) - @ In, kwargs, dict, dictionary of parameters for this rank_crowding method: - objVar, string, the names of the objective variables - @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome. - offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome. - """ - if isinstance(kwargs['objVar'], str) == True: - objVar = [kwargs['objVar']] - else: - objVar = kwargs['objVar'] - g = kwargs['constraintFunction'] +# @ In, rlz, xr.Dataset, containing the evaluation of a certain +# set of individuals (can be the initial population for the very first iteration, +# or a population of offsprings) +# @ In, kwargs, dict, dictionary of parameters for this rank_crowding method: +# objVar, string, the names of the objective variables +# @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome. +# offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome. +# """ +# if isinstance(kwargs['objVar'], str) == True: +# objVar = [kwargs['objVar']] +# else: +# objVar = kwargs['objVar'] +# g = kwargs['constraintFunction'] - for j in range(len(objVar)): - fitness = np.zeros((len(g.data), 1)) - for i in range(len(fitness)): - fitness[i] = countConstViolation(g.data[i]) - fitness = [-item for sublist in fitness.tolist() for item in sublist] - fitness = xr.DataArray(fitness, - dims=['NumOfConstraintViolated'], - coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) - if j == 0: - fitnessSet = fitness.to_dataset(name = objVar[j]) - else: - fitnessSet[objVar[j]] = fitness +# for j in range(len(objVar)): +# fitness = np.zeros((len(g.data), 1)) +# for i in range(len(fitness)): +# fitness[i] = countConstViolation(g.data[i]) +# fitness = [-item for sublist in fitness.tolist() for item in sublist] +# fitness = xr.DataArray(fitness, +# dims=['NumOfConstraintViolated'], +# coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) +# if j == 0: +# fitnessSet = fitness.to_dataset(name = objVar[j]) +# else: +# fitnessSet[objVar[j]] = fitness - return fitnessSet +# return fitnessSet def feasibleFirst(rlz,**kwargs): @@ -176,7 +177,13 @@ def feasibleFirst(rlz,**kwargs): fit = worstObj for constInd,_ in enumerate(g['Constraint'].data): fit+= objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." - fitness.append(-1*fit) + if len(kwargs['type']) == 1: + fitness.append(-1*fit) + else: + if kwargs['type'][i] == 'min': + fitness.append(fit) + else: + fitness.append(-1*fit) fitness = xr.DataArray(np.array(fitness), dims=['chromosome'], coords={'chromosome': np.arange(len(data))}) @@ -235,7 +242,8 @@ def logistic(rlz,**kwargs): __fitness['invLinear'] = invLinear __fitness['logistic'] = logistic __fitness['feasibleFirst'] = feasibleFirst -__fitness['hardConstraint'] = hardConstraint +#NOTE hardConstraint method will be used later once constraintHandling is realized. Until then, it will be commented. @JunyungKim +# __fitness['hardConstraint'] = hardConstraint def returnInstance(cls, name): From 59d43e1eb21a400f4e9172c290ae5e19744efdf1 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 26 Sep 2023 16:56:49 -0600 Subject: [PATCH 50/84] 1. Mohammad's comments are reflected; 2. Unneccesary if-else statements for single/multi objective optimization in _useRealization are removed and cleaned; 3. Unnecessary commented lines in frontUtils are removed; 4. fitness calculation for multi objective optimization is corrected. Now min-min/min-max/max-min/max-max combinations work fine. --- ravenframework/Optimizers/GeneticAlgorithm.py | 397 +++++++++--------- ravenframework/Optimizers/fitness/fitness.py | 98 +++-- .../parentSelectors/parentSelectors.py | 11 +- ravenframework/utils/frontUtils.py | 28 -- 4 files changed, 268 insertions(+), 266 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 9b885225c0..39385e2322 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -75,37 +75,38 @@ def __init__(self): self.popAge = None # population age self.fitness = None # population fitness self.rank = None # population rank (for Multi-objective optimization only) - self.constraintsV = None # // + self.constraintsV = None # calculated contraints value self.crowdingDistance = None # population crowding distance (for Multi-objective optimization only) self.ahdp = np.NaN # p-Average Hausdorff Distance between populations self.ahd = np.NaN # Hausdorff Distance between populations - self.bestPoint = None # // - self.bestFitness = None # // - self.bestObjective = None # // - self.multiBestPoint = None # // - self.multiBestFitness = None # // - self.multiBestObjective = None # // - self.multiBestConstraint = None # // - self.multiBestRank = None # // - self.multiBestCD = None # // - self.objectiveVal = None # // - self._populationSize = None # // - self._parentSelectionType = None # // - self._parentSelectionInstance = None # // - self._nParents = None # // - self._nChildren = None # // - self._crossoverType = None # // - self._crossoverPoints = None # // - self._crossoverProb = None # // - self._crossoverInstance = None # // - self._mutationType = None # // - self._mutationLocs = None # // - self._mutationProb = None # // - self._mutationInstance = None # // - self._survivorSelectionType = None # // - self._survivorSelectionInstance = None # // - self._fitnessType = None # // + self.bestPoint = None # the best solution (chromosome) found among population in a specific batchId + self.bestFitness = None # fitness value of the best solution found + self.bestObjective = None # objective value of the best solution found + self.multiBestPoint = None # the best solutions (chromosomes) found among population in a specific batchId + self.multiBestFitness = None # fitness values of the best solutions found + self.multiBestObjective = None # objective values of the best solutions found + self.multiBestConstraint = None # constraint values of the best solutions found + self.multiBestRank = None # rank values of the best solutions found + self.multiBestCD = None # crowding distance (CD) values of the best solutions found + self.objectiveVal = None # objective values of solutions + self._populationSize = None # number of population size + self._parentSelectionType = None # type of the parent selection process chosen + self._parentSelectionInstance = None # instance of the parent selection process chosen + self._nParents = None # number of parents + self._nChildren = None # number of children + self._crossoverType = None # type of the crossover process chosen + self._crossoverPoints = None # point where crossover process will happen + self._crossoverProb = None # probability of crossover process will happen + self._crossoverInstance = None # instance of the crossover process chosen + self._mutationType = None # type of the mutation process chosen + self._mutationLocs = None # point where mutation process will happen + self._mutationProb = None # probability of mutation process will happen + self._mutationInstance = None # instance of the mutation process chosen + self._survivorSelectionType = None # type of the survivor selection process chosen + self._survivorSelectionInstance = None # instance of the survivor selection process chosen + self._fitnessType = None # type of the fitness calculation chosen self._objCoeff = None # // + self._objectiveVar = None # objective variable names self._penaltyCoeff = None # // self._fitnessInstance = None # // self._repairInstance = None # // @@ -164,6 +165,7 @@ def getInputSpecification(cls): \item ageBased. \item fitnessBased. \end{itemize} + #NOTE An indicator saying whather GA will handle constraint hardly or softly will be upgraded later @JunyungKim # \item constraintHandling: # \begin{itemize} # \item hard. @@ -177,8 +179,8 @@ def getInputSpecification(cls): descr=r"""The number of chromosomes in each population.""") GAparams.addSub(populationSize) - # Constraint Handling #NOTE An indicator saying whather GA will handle constraint hardly or softly will be upgraded later @JunyungKim + # # Constraint Handling # constraintHandling = InputData.parameterInputFactory('constraintHandling', strictMode=True, # contentType=InputTypes.StringType, # printPriority=108, @@ -271,6 +273,8 @@ def getInputSpecification(cls): c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$ + d. hardConstraint: $fitness = the number of constraints violated.$ + """) fitness.addParam("type", InputTypes.StringType, True, descr=r"""[invLin, logistic, feasibleFirst, hardConstraint]""") @@ -350,7 +354,7 @@ def handleInput(self, paramInput): self._populationSize = populationSizeNode.value #################################################################################### - # parent selection # + # parent selection node # #################################################################################### parentSelectionNode = gaParamsNode.findFirst('parentSelection') self._parentSelectionType = parentSelectionNode.value @@ -359,14 +363,14 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') #################################################################################### - # reproduction # + # reproduction node # #################################################################################### reproductionNode = gaParamsNode.findFirst('reproduction') self._nParents = int(np.ceil(1/2 + np.sqrt(1+4*self._populationSize)/2)) self._nChildren = int(2*comb(self._nParents,2)) #################################################################################### - # crossover # + # crossover node # #################################################################################### crossoverNode = reproductionNode.findFirst('crossover') self._crossoverType = crossoverNode.parameterValues['type'] @@ -380,7 +384,7 @@ def handleInput(self, paramInput): self._crossoverInstance = crossoversReturnInstance(self,name = self._crossoverType) #################################################################################### - # mutation # + # mutation node # #################################################################################### mutationNode = reproductionNode.findFirst('mutation') self._mutationType = mutationNode.parameterValues['type'] @@ -394,7 +398,7 @@ def handleInput(self, paramInput): self._mutationInstance = mutatorsReturnInstance(self,name = self._mutationType) #################################################################################### - # survivor selection # + # survivor selection node # #################################################################################### survivorSelectionNode = gaParamsNode.findFirst('survivorSelection') self._survivorSelectionType = survivorSelectionNode.value @@ -405,17 +409,17 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ') #################################################################################### - # fitness # + # fitness node # #################################################################################### fitnessNode = gaParamsNode.findFirst('fitness') self._fitnessType = fitnessNode.parameterValues['type'] #################################################################################### - # constraint # + # constraint node # #################################################################################### # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. - if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','logistic', 'feasibleFirst','hardConstraint']: - self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, logistic, feasibleFirst and hardConstraint as a fitness, whereas provided fitness is {self._fitnessType}') + if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','logistic', 'feasibleFirst']: + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, logistic, and feasibleFirst as a fitness, whereas provided fitness is {self._fitnessType}') self._expConstr = self.assemblerObjects['Constraint'][0] if 'Constraint' in self.assemblerObjects else None self._impConstr = self.assemblerObjects['ImplicitConstraint'][0] if 'ImplicitConstraint' in self.assemblerObjects else None if self._expConstr != None and self._impConstr != None: @@ -448,7 +452,7 @@ def handleInput(self, paramInput): self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. #################################################################################### - # convergence criterion # + # convergence criterion node # #################################################################################### convNode = paramInput.findFirst('convergence') if convNode is not None: @@ -505,8 +509,11 @@ def needDenormalized(self): # overload as needed in inheritors return True - def singleObjConstraint(self, info, rlz): + def singleConstraint(self, info, rlz): traj = info['traj'] + for t in self._activeTraj[1:]: + self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) + self.incrementIteration(traj) if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) @@ -552,8 +559,11 @@ def singleObjConstraint(self, info, rlz): self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info) return traj, g, objectiveVal, offSprings, offSpringFitness - def multiObjConstraint(self, info, rlz): + def multiConstraint(self, info, rlz): traj = info['traj'] + for t in self._activeTraj[1:]: + self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) + self.incrementIteration(traj) objectiveVal = [] offSprings = datasetToDataArray(rlz, list(self.toBeSampled)) @@ -597,108 +607,74 @@ def multiObjConstraint(self, info, rlz): b=self._penaltyCoeff, constraintFunction=g, constraintNum = self._numOfConst, - type =self._minMax - ) + type = self._minMax) return traj, g, objectiveVal, offSprings, offSpringFitness - def singleObjSurvivorSelect(self, info, rlz): - - traj, g, objectiveVal, offSprings, offSpringFitness = GeneticAlgorithm.singleObjConstraint(self, info, rlz) - - if self.counter > 1: - self.population, self.fitness,\ - self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - fitness=self.fitness, - newRlz=rlz, - offSpringsFitness=offSpringFitness, - popObjectiveVal=self.objectiveVal - ) - else: - self.population = offSprings - self.fitness = offSpringFitness - self.objectiveVal = rlz[self._objectiveVar[0]].data - - - def multiObjSurvivorSelect(self, info, rlz): - - traj, g, objectiveVal, offSprings, offSpringFitness = GeneticAlgorithm.multiObjConstraint(self, info, rlz) - - if self.counter > 1: - self.population,self.rank, \ - self.popAge,self.crowdingDistance, \ - self.objectiveVal,self.fitness, \ - self.constraintsV = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - offsprings=rlz, - popObjectiveVal=self.objectiveVal, - offObjectiveVal=objectiveVal, - popFit = self.fitness, - offFit = offSpringFitness, - popConstV = self.constraintsV, - offConstV = g - ) - else: - self.population = offSprings - self.fitness = offSpringFitness - self.constraintsV = g - - # offspringObjsVals for Rank and CD calculation - offObjVal = [] - for i in range(len(self._objectiveVar)): - offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - - # offspringFitVals for Rank and CD calculation - fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data - offspringFitVals = fitVal.tolist() - offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) - self.rank = xr.DataArray(offSpringRank, - dims=['rank'], - coords={'rank': np.arange(np.shape(offSpringRank)[0])}) - offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, - popSize=len(offSpringRank), - objectives=np.array(offspringFitVals)) - - self.crowdingDistance = xr.DataArray(offSpringCD, - dims=['CrowdingDistance'], - coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) - - self.objectiveVal = [] - for i in range(len(self._objectiveVar)): - self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - - self._collectOptPointMulti(self.population, - self.rank, - self.crowdingDistance, - self.objectiveVal, - self.fitness, - self.constraintsV) - self._resolveNewGenerationMulti(traj, rlz, info) - - ####################################################################################################### - ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used - ## These are currently for debugging purposes @JunyungKim - import matplotlib.pyplot as plt - - signChange = list(map(lambda x:-1 if x=="max" else 1 , self._minMax)) - for i in range(0, len(self.multiBestObjective)): - newMultiBestObjective = self.multiBestObjective * signChange - - plt.title(str('BatchID = ' + str(self.batchId))) - plt.plot(newMultiBestObjective[:,0], - newMultiBestObjective[:,1],'*') - - for i in range(len(self.multiBestObjective[:,0])): - plt.text(newMultiBestObjective[i,0], - newMultiBestObjective[i,1], str(self.batchId)) - # plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') - plt.savefig('PF_'+str(self.batchId)+'.png') - ####################################################################################################### - - - + def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): + if self.counter > 1: + self.population, self.fitness,\ + self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + fitness=self.fitness, + newRlz=rlz, + offSpringsFitness=offSpringFitness, + popObjectiveVal=self.objectiveVal) + else: + self.population = offSprings + self.fitness = offSpringFitness + self.objectiveVal = rlz[self._objectiveVar[0]].data + + def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): + if self.counter > 1: + self.population,self.rank, \ + self.popAge,self.crowdingDistance, \ + self.objectiveVal,self.fitness, \ + self.constraintsV = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + offsprings=rlz, + popObjectiveVal=self.objectiveVal, + offObjectiveVal=objectiveVal, + popFit = self.fitness, + offFit = offSpringFitness, + popConstV = self.constraintsV, + offConstV = g) + else: + self.population = offSprings + self.fitness = offSpringFitness + self.constraintsV = g + + # offspringObjsVals for Rank and CD calculation + offObjVal = [] + for i in range(len(self._objectiveVar)): + offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + # offspringFitVals for Rank and CD calculation + fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data + offspringFitVals = fitVal.tolist() + offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) + self.rank = xr.DataArray(offSpringRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(offSpringRank)[0])}) + offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, + popSize=len(offSpringRank), + objectives=np.array(offspringFitVals)) + + self.crowdingDistance = xr.DataArray(offSpringCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) + self.objectiveVal = [] + for i in range(len(self._objectiveVar)): + self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + self._collectOptPointMulti(self.population, + self.rank, + self.crowdingDistance, + self.objectiveVal, + self.fitness, + self.constraintsV) + self._resolveNewGenerationMulti(traj, rlz, info) ######################################################################################################### # Run Methods # @@ -720,86 +696,100 @@ def _useRealization(self, info, rlz): @ Out, None """ - traj = info['traj'] - for t in self._activeTraj[1:]: - self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0) - self.incrementIteration(traj) info['step'] = self.counter - objInd = 1 if len(self._objectiveVar) == 1 else 2 # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) # 0.1 @ n-1: fitnessCalculation(rlz): Perform fitness calculation for newly obtained children (rlz) - # fitness calculation method (i.e., singleObjConstraint or multiObjConstraint) will be called by either singleObjSurvivorSelect or multiObjSurvivorSelect. - - # 0.2@ n-1: Survivor selection(rlz): Update population container given obtained children - survivorSelectionFuncs: dict = {1: GeneticAlgorithm.singleObjSurvivorSelect, 2: GeneticAlgorithm.multiObjSurvivorSelect} - survivor = survivorSelectionFuncs.get(objInd, GeneticAlgorithm.singleObjSurvivorSelect) - survivor(self, info, rlz) - # 1 @ n: Parent selection from population - # pair parents together by indexes - if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case. - parents = self._parentSelectionInstance(self.population, - variables=list(self.toBeSampled), - fitness=self.fitness, - nParents=self._nParents) + objInd = 1 if len(self._objectiveVar) == 1 else 2 + constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint} + const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint) + traj, g, objectiveVal, offSprings, offSpringFitness = const(self, info, rlz) - else: # This is for a multi-objective Optimization case. + # 0.2@ n-1: Survivor selection(rlz): Update population container given obtained children + if self._activeTraj: + survivorSelectionFuncs: dict = {1: GeneticAlgorithm.singleObjSurvivorSelect, 2: GeneticAlgorithm.multiObjSurvivorSelect} + survivorSelection = survivorSelectionFuncs.get(objInd, GeneticAlgorithm.singleObjSurvivorSelect) + survivorSelection(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g) + + ####################################################################################################### + # ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used + # ## These are currently for debugging purposes @JunyungKim + # import matplotlib.pyplot as plt + + # signChange = list(map(lambda x:-1 if x=="max" else 1 , self._minMax)) + # for i in range(0, len(self.multiBestObjective)): + # newMultiBestObjective = self.multiBestObjective * signChange + + # plt.title(str('BatchID = ' + str(self.batchId))) + # plt.plot(newMultiBestObjective[:,0], + # newMultiBestObjective[:,1],'*') + + # for i in range(len(self.multiBestObjective[:,0])): + # plt.text(newMultiBestObjective[i,0], + # newMultiBestObjective[i,1], str(self.batchId)) + # # plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') + # plt.savefig('PF_'+str(self.batchId)+'.png') + ####################################################################################################### + + # 1 @ n: Parent selection from population + # Pair parents together by indexes parents = self._parentSelectionInstance(self.population, variables=list(self.toBeSampled), + fitness = self.fitness, nParents=self._nParents, rank = self.rank, crowdDistance = self.crowdingDistance, - fitness = self.fitness + objVal = self._objectiveVar ) - # 2 @ n: Crossover from set of parents - # Create childrenCoordinates (x1,...,xM) - childrenXover = self._crossoverInstance(parents=parents, - variables=list(self.toBeSampled), - crossoverProb=self._crossoverProb, - points=self._crossoverPoints) - - # 3 @ n: Mutation - # Perform random directly on childrenCoordinates - childrenMutated = self._mutationInstance(offSprings=childrenXover, - distDict=self.distDict, - locs=self._mutationLocs, - mutationProb=self._mutationProb, - variables=list(self.toBeSampled)) - - # 4 @ n: repair/replacement - # Repair should only happen if multiple genes in a single chromosome have the same values (), - # and at the same time the sampling of these genes should be with Out replacement. - needsRepair = False - for chrom in range(self._nChildren): - unique = set(childrenMutated.data[chrom, :]) - if len(childrenMutated.data[chrom,:]) != len(unique): - for var in self.toBeSampled: # TODO: there must be a smarter way to check if a variables strategy is without replacement - if (hasattr(self.distDict[var], 'strategy') and self.distDict[var].strategy == 'withoutReplacement'): - needsRepair = True - break - if needsRepair: - children = self._repairInstance(childrenMutated,variables=list(self.toBeSampled),distInfo=self.distDict) - else: - children = childrenMutated + # 2 @ n: Crossover from set of parents + # Create childrenCoordinates (x1,...,xM) + childrenXover = self._crossoverInstance(parents=parents, + variables=list(self.toBeSampled), + crossoverProb=self._crossoverProb, + points=self._crossoverPoints) + + # 3 @ n: Mutation + # Perform random directly on childrenCoordinates + childrenMutated = self._mutationInstance(offSprings=childrenXover, + distDict=self.distDict, + locs=self._mutationLocs, + mutationProb=self._mutationProb, + variables=list(self.toBeSampled)) + + # 4 @ n: repair/replacement + # Repair should only happen if multiple genes in a single chromosome have the same values (), + # and at the same time the sampling of these genes should be with Out replacement. + needsRepair = False + for chrom in range(self._nChildren): + unique = set(childrenMutated.data[chrom, :]) + if len(childrenMutated.data[chrom,:]) != len(unique): + for var in self.toBeSampled: # TODO: there must be a smarter way to check if a variables strategy is without replacement + if (hasattr(self.distDict[var], 'strategy') and self.distDict[var].strategy == 'withoutReplacement'): + needsRepair = True + break + if needsRepair: + children = self._repairInstance(childrenMutated,variables=list(self.toBeSampled),distInfo=self.distDict) + else: + children = childrenMutated - # keeping the population size constant by ignoring the excessive children - children = children[:self._populationSize, :] + # keeping the population size constant by ignoring the excessive children + children = children[:self._populationSize, :] - daChildren = xr.DataArray(children, - dims=['chromosome','Gene'], - coords={'chromosome': np.arange(np.shape(children)[0]), - 'Gene':list(self.toBeSampled)}) + daChildren = xr.DataArray(children, + dims=['chromosome','Gene'], + coords={'chromosome': np.arange(np.shape(children)[0]), + 'Gene':list(self.toBeSampled)}) - # 5 @ n: Submit children batch - # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates - for i in range(self.batch): - newRlz = {} - for _, var in enumerate(self.toBeSampled.keys()): - newRlz[var] = float(daChildren.loc[i, var].values) - self._submitRun(newRlz, traj, self.getIteration(traj)) + # 5 @ n: Submit children batch + # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates + for i in range(self.batch): + newRlz = {} + for _, var in enumerate(self.toBeSampled.keys()): + newRlz[var] = float(daChildren.loc[i, var].values) + self._submitRun(newRlz, traj, self.getIteration(traj)) def _submitRun(self, point, traj, step, moreInfo=None): """ @@ -925,6 +915,7 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): print("### self.population.shape is {}".format(self.population.shape)) for i in range(rlz.sizes['RAVEN_sample_ID']): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) + # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) rlzDict['batchId'] = rlz['batchId'].data[i] for j in range(len(self._objectiveVar)): diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index bd4b5d834e..ccd5702b83 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -59,22 +59,11 @@ def invLinear(rlz,**kwargs): the farthest from violating the constraint it is, The highest negative value it have the largest the violation is. @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - if kwargs['a'] == None: - a = [1.0] - else: - a = kwargs['a'] - if kwargs['b'] == None: - b = [10.0] - else: - b = kwargs['b'] - if kwargs['constraintFunction'].all() == None: - penalty = 0.0 - else: - penalty = kwargs['constraintFunction'].data - if isinstance(kwargs['objVar'], str) == True: - objVar = [kwargs['objVar']] - else: - objVar = kwargs['objVar'] + a = [1.0] if kwargs['a'] == None else kwargs['a'] + b = [10.0] if kwargs['b'] == None else kwargs['b'] + penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data + objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] + for j in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[j]].data) fitness = -a[0] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[0] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1) @@ -154,10 +143,9 @@ def feasibleFirst(rlz,**kwargs): 'constraintFunction', xr.Dataarray, containing all constraint functions (explicit and implicit) evaluations for the whole population @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - if isinstance(kwargs['objVar'], str) == True: - objVar = [kwargs['objVar']] - else: - objVar = kwargs['objVar'] + optType = kwargs['type'] + objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] + a = 1.0 if kwargs['a'] == None else kwargs['a'] if kwargs['constraintNum'] == 0: pen = kwargs['b'] else: @@ -166,31 +154,83 @@ def feasibleFirst(rlz,**kwargs): pen = [penalty[i:i+len(g['Constraint'].data)] for i in range(0, len(penalty), len(g['Constraint'].data))] objPen = dict(map(lambda i,j : (i,j), objVar, pen)) + for i in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[i]].data) worstObj = max(data) fitness = [] for ind in range(data.size): if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0): - fit=(data[ind]) + fit=(a*data[ind]) else: - fit = worstObj + fit = a*worstObj for constInd,_ in enumerate(g['Constraint'].data): - fit+= objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." + fit = a*fit + objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." if len(kwargs['type']) == 1: fitness.append(-1*fit) else: - if kwargs['type'][i] == 'min': - fitness.append(fit) - else: - fitness.append(-1*fit) + fitness.append(fit) + fitness = xr.DataArray(np.array(fitness), - dims=['chromosome'], - coords={'chromosome': np.arange(len(data))}) + dims=['chromosome'], + coords={'chromosome': np.arange(len(data))}) if i == 0: fitnessSet = fitness.to_dataset(name = objVar[i]) else: fitnessSet[objVar[i]] = fitness + # if len(objVar) == 1: + # for i in range(len(objVar)): + # data = np.atleast_1d(rlz[objVar][objVar[i]].data) + # worstObj = max(data) + # fitness = [] + # for ind in range(data.size): + # if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0): + # fit=(a*data[ind]) + # else: + # fit = a*worstObj + # for constInd,_ in enumerate(g['Constraint'].data): + # fit = a*fit + objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." + # if len(kwargs['type']) == 1: + # fitness.append(-1*fit) + # else: + # if kwargs['type'][i] == 'min': + # fitness.append(fit) + # else: + # fitness.append(-1*fit) + # fitness = xr.DataArray(np.array(fitness), + # dims=['chromosome'], + # coords={'chromosome': np.arange(len(data))}) + # if i == 0: + # fitnessSet = fitness.to_dataset(name = objVar[i]) + # else: + # fitnessSet[objVar[i]] = fitness + # else: + # for i in range(len(objVar)): + # data = np.atleast_1d(rlz[objVar][objVar[i]].data) + # worstObj = max(data) + # fitness = [] + # for ind in range(data.size): + # if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0): + # fit=(a*data[ind]) + # else: + # fit = a*worstObj + # for constInd,_ in enumerate(g['Constraint'].data): + # fit = a*fit + objPen[objVar[i]][constInd]*(max(0,g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." + # if len(kwargs['type']) == 1: + # fitness.append(1*fit) + # else: + # if kwargs['type'][i] == 'min': + # fitness.append(fit) + # else: + # fitness.append(1*fit) + # fitness = xr.DataArray(np.array(fitness), + # dims=['chromosome'], + # coords={'chromosome': np.arange(len(data))}) + # if i == 0: + # fitnessSet = fitness.to_dataset(name = objVar[i]) + # else: + # fitnessSet[objVar[i]] = fitness + return fitnessSet def logistic(rlz,**kwargs): diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py index 5ddc0d87ae..bd327a5d52 100644 --- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py +++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py @@ -97,21 +97,20 @@ def tournamentSelection(population,**kwargs): @ Out, newPopulation, xr.DataArray, selected parents, """ - nParents= kwargs['nParents'] + nParents = kwargs['nParents'] + nObjVal = len(kwargs['objVal']) pop = population popSize = population.values.shape[0] - if 'rank' in kwargs.keys(): - # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to + if nObjVal > 1: + # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to. rank = kwargs['rank'] crowdDistance = kwargs['crowdDistance'] - # constraintInfo = kwargs['constraint'] multiObjectiveRanking = True - matrixOperationRaw = np.zeros((popSize, 3)) #NOTE if constraint is needed to eliminate chromosome violating constraints, then poopSize should be 4. + matrixOperationRaw = np.zeros((popSize, 3)) #NOTE if constraint information is in need to eliminate chromosome violating constraints, then poopSize should be 4. matrixOperationRaw[:,0] = np.transpose(np.arange(popSize)) matrixOperationRaw[:,1] = np.transpose(crowdDistance.data) matrixOperationRaw[:,2] = np.transpose(rank.data) - # matrixOperationRaw[:,3] = np.transpose(constraintInfo.data) matrixOperation = np.zeros((popSize,len(matrixOperationRaw[0]))) else: fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) diff --git a/ravenframework/utils/frontUtils.py b/ravenframework/utils/frontUtils.py index 8bb0e11c72..ca999e02be 100644 --- a/ravenframework/utils/frontUtils.py +++ b/ravenframework/utils/frontUtils.py @@ -80,13 +80,6 @@ def rankNonDominatedFrontiers(data): @ out, nonDominatedRank, list, a list of length nPoints that has the ranking of the front passing through each point """ - ## tentative code block start - # import matplotlib.pyplot as plt - # from mpl_toolkits.mplot3d import Axes3D - # xdata = [item[0] for item in data] - # ydata = [item[1] for item in data] - # zdata = [item[2] for item in data] - nonDominatedRank = np.zeros(data.shape[0],dtype=int) rank = 0 indicesDominated = list(np.arange(data.shape[0])) @@ -102,13 +95,6 @@ def rankNonDominatedFrontiers(data): data = rawData[indicesDominated] nonDominatedRank[indicesNonDominated] = rank nonDominatedRank = list(nonDominatedRank) - - # ax3d = plt.figure().gca(projection='3d') - # ax3d.scatter(xdata, ydata, zdata) - # for x, y, z, label in zip(xdata, ydata, zdata, nonDominatedRank): - # ax3d.text(x, y, z, label) - # plt.title("Data") - # plt.show() return nonDominatedRank def crowdingDistance(rank, popSize, objectives): @@ -119,13 +105,6 @@ def crowdingDistance(rank, popSize, objectives): @ In, objectives, np.array, matrix contains objective values for each element of the population @ Out, crowdDist, np.array, array of crowding distances """ - # # tentative code block start - # import matplotlib.pyplot as plt - # from mpl_toolkits.mplot3d import Axes3D - # xdata = [item[0] for item in objectives] - # ydata = [item[1] for item in objectives] - # zdata = [item[2] for item in objectives] - crowdDist = np.zeros(popSize) fronts = np.unique(rank) fronts = fronts[fronts!=np.inf] @@ -140,11 +119,4 @@ def crowdingDistance(rank, popSize, objectives): for i in range(1, len(front)-1): crowdDist[front[sortedRank[i]]] = crowdDist[front[sortedRank[i]]] + (objectives[front[sortedRank[i+1]], obj] - objectives[front[sortedRank[i-1]], obj]) / (fMax[obj]-fMin[obj]) - # ax3d = plt.figure().gca(projection='3d') - # ax3d.scatter(xdata, ydata, zdata) - # for x, y, z, label in zip(xdata, ydata, zdata, [i+j for i, j in zip([str(ele) for ele in rank.data.tolist()], [str(ele) for ele in crowdDist.tolist()])]): - # ax3d.text(x, y, z, label) - # plt.title("Data") - # plt.show() - return crowdDist From 8fb32c396728e205a6ef26059a185c297aaa3098 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 26 Sep 2023 17:09:29 -0600 Subject: [PATCH 51/84] 1. missing descriptions of self are added. --- ravenframework/Optimizers/GeneticAlgorithm.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 39385e2322..e0501e2425 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -105,12 +105,12 @@ def __init__(self): self._survivorSelectionType = None # type of the survivor selection process chosen self._survivorSelectionInstance = None # instance of the survivor selection process chosen self._fitnessType = None # type of the fitness calculation chosen - self._objCoeff = None # // + self._objCoeff = None # weight coefficients of objectives for fitness calculation self._objectiveVar = None # objective variable names - self._penaltyCoeff = None # // - self._fitnessInstance = None # // - self._repairInstance = None # // - self._canHandleMultiObjective = True # // + self._penaltyCoeff = None # weight coefficients corresponding to constraints and objectives for fitness calculation + self._fitnessInstance = None # instance of fitness + self._repairInstance = None # instance of repair + self._canHandleMultiObjective = True # boolean indicator whether optimization is a sinlge-objective problem or a multi-objective problem ########################## # Initialization Methods # From 4dc0e57f70542be43ba9687d662c33ea2165e059 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sat, 14 Oct 2023 17:40:37 -0600 Subject: [PATCH 52/84] tournamentSelection method in parentSelectors.py is enhanced following the logic described in tutorialspoint. New node called kSelection is introduced to let users choose the number of chromosomes selected for tournament. --- ravenframework/Optimizers/GeneticAlgorithm.py | 99 +++++++++++-------- .../parentSelectors/parentSelectors.py | 59 +++-------- 2 files changed, 69 insertions(+), 89 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index e0501e2425..6b45f93584 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -75,24 +75,25 @@ def __init__(self): self.popAge = None # population age self.fitness = None # population fitness self.rank = None # population rank (for Multi-objective optimization only) - self.constraintsV = None # calculated contraints value + self.constraintsV = None # calculated contraints value self.crowdingDistance = None # population crowding distance (for Multi-objective optimization only) self.ahdp = np.NaN # p-Average Hausdorff Distance between populations self.ahd = np.NaN # Hausdorff Distance between populations self.bestPoint = None # the best solution (chromosome) found among population in a specific batchId - self.bestFitness = None # fitness value of the best solution found - self.bestObjective = None # objective value of the best solution found + self.bestFitness = None # fitness value of the best solution found + self.bestObjective = None # objective value of the best solution found self.multiBestPoint = None # the best solutions (chromosomes) found among population in a specific batchId - self.multiBestFitness = None # fitness values of the best solutions found - self.multiBestObjective = None # objective values of the best solutions found - self.multiBestConstraint = None # constraint values of the best solutions found - self.multiBestRank = None # rank values of the best solutions found - self.multiBestCD = None # crowding distance (CD) values of the best solutions found - self.objectiveVal = None # objective values of solutions + self.multiBestFitness = None # fitness values of the best solutions found + self.multiBestObjective = None # objective values of the best solutions found + self.multiBestConstraint = None # constraint values of the best solutions found + self.multiBestRank = None # rank values of the best solutions found + self.multiBestCD = None # crowding distance (CD) values of the best solutions found + self.objectiveVal = None # objective values of solutions self._populationSize = None # number of population size self._parentSelectionType = None # type of the parent selection process chosen self._parentSelectionInstance = None # instance of the parent selection process chosen self._nParents = None # number of parents + self._kSelection = None # number of chromosomes selected for tournament selection self._nChildren = None # number of children self._crossoverType = None # type of the crossover process chosen self._crossoverPoints = None # point where crossover process will happen @@ -170,7 +171,7 @@ def getInputSpecification(cls): # \begin{itemize} # \item hard. # \item soft. - # \end{itemize} + # \end{itemize} \end{itemize}""") # Population Size populationSize = InputData.parameterInputFactory('populationSize', strictMode=True, @@ -178,7 +179,7 @@ def getInputSpecification(cls): printPriority=108, descr=r"""The number of chromosomes in each population.""") GAparams.addSub(populationSize) - + #NOTE An indicator saying whather GA will handle constraint hardly or softly will be upgraded later @JunyungKim # # Constraint Handling # constraintHandling = InputData.parameterInputFactory('constraintHandling', strictMode=True, @@ -186,7 +187,7 @@ def getInputSpecification(cls): # printPriority=108, # descr=r"""a node indicating whether GA will handle constraints hardly or softly.""") # GAparams.addSub(constraintHandling) - + # Parent Selection parentSelection = InputData.parameterInputFactory('parentSelection', strictMode=True, contentType=InputTypes.StringType, @@ -208,6 +209,12 @@ def getInputSpecification(cls): printPriority=108, descr=r"""a node containing the reproduction methods. This accepts subnodes that specifies the types of crossover and mutation.""") + # 0. k-selectionNumber of Parents + kSelection = InputData.parameterInputFactory('kSelection', strictMode=True, + contentType=InputTypes.IntegerType, + printPriority=108, + descr=r"""Number of chromosome selected for tournament selection""") + reproduction.addSub(kSelection) # 1. Crossover crossover = InputData.parameterInputFactory('crossover', strictMode=True, contentType=InputTypes.StringType, @@ -272,9 +279,9 @@ def getInputSpecification(cls): b. logistic: $fitness = \frac{1}{1+e^{a\times(obj-b)}}$. c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$ - + d. hardConstraint: $fitness = the number of constraints violated.$ - + """) fitness.addParam("type", InputTypes.StringType, True, descr=r"""[invLin, logistic, feasibleFirst, hardConstraint]""") @@ -346,29 +353,38 @@ def handleInput(self, paramInput): # GAparams # #################################################################################### gaParamsNode = paramInput.findFirst('GAparams') - + #################################################################################### # populationSize # #################################################################################### populationSizeNode = gaParamsNode.findFirst('populationSize') self._populationSize = populationSizeNode.value - + #################################################################################### # parent selection node # #################################################################################### parentSelectionNode = gaParamsNode.findFirst('parentSelection') self._parentSelectionType = parentSelectionNode.value self._parentSelectionInstance = parentSelectionReturnInstance(self, name=parentSelectionNode.value) - if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection': - self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') - + + # if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection': + # self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') + #################################################################################### # reproduction node # #################################################################################### reproductionNode = gaParamsNode.findFirst('reproduction') self._nParents = int(np.ceil(1/2 + np.sqrt(1+4*self._populationSize)/2)) self._nChildren = int(2*comb(self._nParents,2)) - + + #################################################################################### + # k-Selection node # + #################################################################################### + if reproductionNode.findFirst('kSelection') is None: + self._kSelection = 3 # Default value is set to 3. + else: + self._kSelection = reproductionNode.findFirst('kSelection').value + #################################################################################### # crossover node # #################################################################################### @@ -382,7 +398,7 @@ def handleInput(self, paramInput): self._crossoverPoints = crossoverNode.findFirst('points').value self._crossoverProb = crossoverNode.findFirst('crossoverProb').value self._crossoverInstance = crossoversReturnInstance(self,name = self._crossoverType) - + #################################################################################### # mutation node # #################################################################################### @@ -396,7 +412,7 @@ def handleInput(self, paramInput): self._mutationLocs = mutationNode.findFirst('locs').value self._mutationProb = mutationNode.findFirst('mutationProb').value self._mutationInstance = mutatorsReturnInstance(self,name = self._mutationType) - + #################################################################################### # survivor selection node # #################################################################################### @@ -407,13 +423,13 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support ageBased, fitnessBased, and rankNcrowdingBased as a survivorSelector, whereas provided survivorSelector is {self._survivorSelectionType}') if len(self._objectiveVar) == 1 and self._survivorSelectionType == 'rankNcrowdingBased': self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ') - + #################################################################################### # fitness node # #################################################################################### fitnessNode = gaParamsNode.findFirst('fitness') self._fitnessType = fitnessNode.parameterValues['type'] - + #################################################################################### # constraint node # #################################################################################### @@ -423,11 +439,11 @@ def handleInput(self, paramInput): self._expConstr = self.assemblerObjects['Constraint'][0] if 'Constraint' in self.assemblerObjects else None self._impConstr = self.assemblerObjects['ImplicitConstraint'][0] if 'ImplicitConstraint' in self.assemblerObjects else None if self._expConstr != None and self._impConstr != None: - self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External']) + self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External']) elif self._expConstr == None and self._impConstr != None: - self._numOfConst = len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External']) + self._numOfConst = len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External']) elif self._expConstr != None and self._impConstr == None: - self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) else: self._numOfConst = 0 if (self._expConstr != None) and (self._impConstr != None) and (self._penaltyCoeff != None): @@ -437,16 +453,16 @@ def handleInput(self, paramInput): pass self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None #NOTE the code lines below are for 'feasibleFirst' temperarily. It will be generalized for invLinear as well. - if self._fitnessType == 'feasibleFirst': + if self._fitnessType == 'feasibleFirst': if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: - self._penaltyCoeff = fitnessNode.findFirst('b').value + self._penaltyCoeff = fitnessNode.findFirst('b').value elif self._numOfConst == 0 and fitnessNode.findFirst('b') is not None: - self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints') + self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints') elif self._numOfConst != 0 and fitnessNode.findFirst('b') is None: - self._penaltyCoeff = list(np.repeat(1, self._numOfConst * len(self._objectiveVar))) #NOTE if penaltyCoeff is not provided, then assume they are all 1. + self._penaltyCoeff = list(np.repeat(1, self._numOfConst * len(self._objectiveVar))) #NOTE if penaltyCoeff is not provided, then assume they are all 1. else: - self._penaltyCoeff = list(np.repeat(0, len(self._objectiveVar))) - else: + self._penaltyCoeff = list(np.repeat(0, len(self._objectiveVar))) + else: self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. @@ -558,7 +574,7 @@ def singleConstraint(self, info, rlz): self._collectOptPoint(rlz, offSpringFitness, objectiveVal, g) self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info) return traj, g, objectiveVal, offSprings, offSpringFitness - + def multiConstraint(self, info, rlz): traj = info['traj'] for t in self._activeTraj[1:]: @@ -657,8 +673,8 @@ def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, self.rank = xr.DataArray(offSpringRank, dims=['rank'], coords={'rank': np.arange(np.shape(offSpringRank)[0])}) - offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, - popSize=len(offSpringRank), + offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, + popSize=len(offSpringRank), objectives=np.array(offspringFitVals)) self.crowdingDistance = xr.DataArray(offSpringCD, @@ -681,7 +697,7 @@ def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, ######################################################################################################### ######################################################################################################### - # Developer note: + # Developer note: # Each algorithm step is indicated by a number followed by the generation number # e.g., '0 @ n-1' refers to step 0 for generation n-1 (i.e., previous generation) # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams. @@ -731,13 +747,14 @@ def _useRealization(self, info, rlz): # newMultiBestObjective[i,1], str(self.batchId)) # # plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') # plt.savefig('PF_'+str(self.batchId)+'.png') - ####################################################################################################### + ####################################################################################################### # 1 @ n: Parent selection from population # Pair parents together by indexes parents = self._parentSelectionInstance(self.population, variables=list(self.toBeSampled), fitness = self.fitness, + kSelection = self._kSelection, nParents=self._nParents, rank = self.rank, crowdDistance = self.crowdingDistance, @@ -966,7 +983,7 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g): if self._fitnessType == 'hardConstraint': optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1],-x[2]))]) else: - optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))]) + optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))]) point = dict((var,float(optPoints[0][i])) for i, var in enumerate(selVars) if var in rlz.data_vars) gOfBest = dict(('ConstraintEvaluation_'+name,float(gOfBest[0][i])) for i, name in enumerate(g.coords['Constraint'].values)) if (self.counter > 1 and obj[0] <= self.bestObjective and fit[0] >= self.bestFitness) or self.counter == 1: @@ -1016,7 +1033,7 @@ def _collectOptPointMulti(self, population, rank, CD, objVal, fitness, constrain self.multiBestFitness = fitSet self.multiBestObjective = optObjVal self.multiBestConstraint = optConstNew - self.multiBestRank = optRank + self.multiBestRank = optRank self.multiBestCD = optCD return optPointsDic @@ -1323,7 +1340,7 @@ def _checkImpFunctionalConstraints(self, point, opt, impConstraint): return g ############################### - # END constraint handling # + # END constraint handling # ############################### def _addToSolutionExport(self, traj, rlz, acceptable): """ diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py index bd327a5d52..f529d55247 100644 --- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py +++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py @@ -47,7 +47,7 @@ def rouletteWheel(population,**kwargs): """ # Arguments pop = population - fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) + fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) nParents= kwargs['nParents'] # if nparents = population size then do nothing (whole population are parents) if nParents == pop.shape[0]: @@ -98,63 +98,26 @@ def tournamentSelection(population,**kwargs): """ nParents = kwargs['nParents'] - nObjVal = len(kwargs['objVal']) + kSelect = kwargs['kSelection'] pop = population - popSize = population.values.shape[0] - - if nObjVal > 1: - # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to. - rank = kwargs['rank'] - crowdDistance = kwargs['crowdDistance'] - multiObjectiveRanking = True - matrixOperationRaw = np.zeros((popSize, 3)) #NOTE if constraint information is in need to eliminate chromosome violating constraints, then poopSize should be 4. - matrixOperationRaw[:,0] = np.transpose(np.arange(popSize)) - matrixOperationRaw[:,1] = np.transpose(crowdDistance.data) - matrixOperationRaw[:,2] = np.transpose(rank.data) - matrixOperation = np.zeros((popSize,len(matrixOperationRaw[0]))) - else: - fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) - multiObjectiveRanking = False - matrixOperationRaw = np.zeros((popSize,2)) - matrixOperationRaw[:,0] = np.transpose(np.arange(popSize)) - matrixOperationRaw[:,1] = np.transpose(fitness) - matrixOperation = np.zeros((popSize,2)) - - indexes = list(np.arange(popSize)) - indexesShuffled = randomUtils.randomChoice(indexes, size=popSize, replace=False, engine=None) - - if popSize<2*nParents: - raise ValueError('In tournamentSelection the number of parents cannot be larger than half of the population size.') - - for idx, val in enumerate(indexesShuffled): - matrixOperation[idx,:] = matrixOperationRaw[val,:] selectedParent = xr.DataArray(np.zeros((nParents,np.shape(pop)[1])), dims=['chromosome','Gene'], coords={'chromosome':np.arange(nParents), 'Gene': kwargs['variables']}) + fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) - if not multiObjectiveRanking: # single-objective implementation of tournamentSelection - for i in range(nParents): - if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: - index = int(matrixOperation[2*i,0]) - else: - index = int(matrixOperation[2*i+1,0]) - selectedParent[i,:] = pop.values[index,:] - else: # multi-objective implementation of tournamentSelection - for i in range(nParents): - if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i+1,0]) - elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i,0]) - elif matrixOperation[2*i,2] == matrixOperation[2*i+1,2]: # if same rank, then compare CD - if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: index = int(matrixOperation[2*i,0]) - elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i+1,0]) - else: # same rank and same CD - index = int(matrixOperation[2*i+1,0]) #NOTE if rank and CD are same, then any chromosome can be selected. - selectedParent[i,:] = pop.values[index,:] + for i in range(nParents): + matrixOperationRaw = np.zeros((kSelect,2)) + selectChromoIndexes = list(np.arange(kSelect)) + selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None) + matrixOperationRaw[:,0] = selectedChromo + matrixOperationRaw[:,1] = np.transpose(fitness[selectedChromo]) + tournamentWinnerIndex = int(matrixOperationRaw[np.argmax(matrixOperationRaw[:,1]),0]) + selectedParent[i,:] = pop.values[tournamentWinnerIndex,:] return selectedParent - def rankSelection(population,**kwargs): """ Rank Selection mechanism for parent selection From 4f457fe4a38974fc4839c1c7bbc3b95fd595fd28 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sun, 15 Oct 2023 00:04:12 -0600 Subject: [PATCH 53/84] tournamemntSelection for multi-objective is completed. RouletteWheel in general needs only one fitness value of each chromosome. Application of roulette wheel selection in Multi-objective (which means multiple fitness values for each chromosome) optimization needs more time for implementation in RAVEN. For Multi-objective optimization, rankNcrowdingBased must be used for survivorSelection. fitnessBased is assumed that each chromosome has single fitness value. --- ravenframework/Optimizers/GeneticAlgorithm.py | 4 +- ravenframework/Optimizers/fitness/fitness.py | 57 +------------------ .../parentSelectors/parentSelectors.py | 42 ++++++++++---- 3 files changed, 36 insertions(+), 67 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 6b45f93584..b450d21611 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -367,8 +367,8 @@ def handleInput(self, paramInput): self._parentSelectionType = parentSelectionNode.value self._parentSelectionInstance = parentSelectionReturnInstance(self, name=parentSelectionNode.value) - # if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection': - # self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') + if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection': + self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.') #################################################################################### # reproduction node # diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index ccd5702b83..c76f6dfab3 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -124,7 +124,7 @@ def feasibleFirst(rlz,**kwargs): For maximization problems the objective value is multiplied by -1 and hence the previous trends are inverted. A great quality of this fitness is that if the objective value is equal for multiple solutions it selects the furthest from constraint violation. - + Reference: Deb, Kalyanmoy. "An efficient constraint handling method for genetic algorithms." Computer methods in applied mechanics and engineering 186.2-4 (2000): 311-338. .. math:: @@ -152,7 +152,6 @@ def feasibleFirst(rlz,**kwargs): g = kwargs['constraintFunction'] penalty = kwargs['b'] pen = [penalty[i:i+len(g['Constraint'].data)] for i in range(0, len(penalty), len(g['Constraint'].data))] - objPen = dict(map(lambda i,j : (i,j), objVar, pen)) for i in range(len(objVar)): @@ -170,7 +169,7 @@ def feasibleFirst(rlz,**kwargs): fitness.append(-1*fit) else: fitness.append(fit) - + fitness = xr.DataArray(np.array(fitness), dims=['chromosome'], coords={'chromosome': np.arange(len(data))}) @@ -178,58 +177,6 @@ def feasibleFirst(rlz,**kwargs): fitnessSet = fitness.to_dataset(name = objVar[i]) else: fitnessSet[objVar[i]] = fitness - # if len(objVar) == 1: - # for i in range(len(objVar)): - # data = np.atleast_1d(rlz[objVar][objVar[i]].data) - # worstObj = max(data) - # fitness = [] - # for ind in range(data.size): - # if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0): - # fit=(a*data[ind]) - # else: - # fit = a*worstObj - # for constInd,_ in enumerate(g['Constraint'].data): - # fit = a*fit + objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." - # if len(kwargs['type']) == 1: - # fitness.append(-1*fit) - # else: - # if kwargs['type'][i] == 'min': - # fitness.append(fit) - # else: - # fitness.append(-1*fit) - # fitness = xr.DataArray(np.array(fitness), - # dims=['chromosome'], - # coords={'chromosome': np.arange(len(data))}) - # if i == 0: - # fitnessSet = fitness.to_dataset(name = objVar[i]) - # else: - # fitnessSet[objVar[i]] = fitness - # else: - # for i in range(len(objVar)): - # data = np.atleast_1d(rlz[objVar][objVar[i]].data) - # worstObj = max(data) - # fitness = [] - # for ind in range(data.size): - # if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0): - # fit=(a*data[ind]) - # else: - # fit = a*worstObj - # for constInd,_ in enumerate(g['Constraint'].data): - # fit = a*fit + objPen[objVar[i]][constInd]*(max(0,g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." - # if len(kwargs['type']) == 1: - # fitness.append(1*fit) - # else: - # if kwargs['type'][i] == 'min': - # fitness.append(fit) - # else: - # fitness.append(1*fit) - # fitness = xr.DataArray(np.array(fitness), - # dims=['chromosome'], - # coords={'chromosome': np.arange(len(data))}) - # if i == 0: - # fitnessSet = fitness.to_dataset(name = objVar[i]) - # else: - # fitnessSet[objVar[i]] = fitness return fitnessSet diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py index f529d55247..2a38cc3bed 100644 --- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py +++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py @@ -75,7 +75,7 @@ def rouletteWheel(population,**kwargs): selectionProb = shiftedFitness/np.sum(shiftedFitness) # Share of the pie (rouletteWheel) sumProb = selectionProb[counter] - while sumProb < roulettePointer : + while sumProb <= roulettePointer : counter += 1 sumProb += selectionProb[counter] selectedParent[i,:] = pop.values[counter,:] @@ -98,23 +98,45 @@ def tournamentSelection(population,**kwargs): """ nParents = kwargs['nParents'] + nObjVal = len(kwargs['objVal']) kSelect = kwargs['kSelection'] pop = population + popSize = population.values.shape[0] selectedParent = xr.DataArray(np.zeros((nParents,np.shape(pop)[1])), dims=['chromosome','Gene'], coords={'chromosome':np.arange(nParents), 'Gene': kwargs['variables']}) - fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) - for i in range(nParents): - matrixOperationRaw = np.zeros((kSelect,2)) - selectChromoIndexes = list(np.arange(kSelect)) - selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None) - matrixOperationRaw[:,0] = selectedChromo - matrixOperationRaw[:,1] = np.transpose(fitness[selectedChromo]) - tournamentWinnerIndex = int(matrixOperationRaw[np.argmax(matrixOperationRaw[:,1]),0]) - selectedParent[i,:] = pop.values[tournamentWinnerIndex,:] + if nObjVal == 1: # single-objective Case + fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) + for i in range(nParents): + matrixOperationRaw = np.zeros((kSelect,2)) + selectChromoIndexes = list(np.arange(kSelect)) + selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None) + matrixOperationRaw[:,0] = selectedChromo + matrixOperationRaw[:,1] = np.transpose(fitness[selectedChromo]) + tournamentWinnerIndex = int(matrixOperationRaw[np.argmax(matrixOperationRaw[:,1]),0]) + selectedParent[i,:] = pop.values[tournamentWinnerIndex,:] + + else: # multi-objective Case + # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to. + rank = kwargs['rank'] + crowdDistance = kwargs['crowdDistance'] + for i in range(nParents): + matrixOperationRaw = np.zeros((kSelect,3)) + selectChromoIndexes = list(np.arange(kSelect)) + selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None) + matrixOperationRaw[:,0] = selectedChromo + matrixOperationRaw[:,1] = np.transpose(rank.data[selectedChromo]) + matrixOperationRaw[:,2] = np.transpose(crowdDistance.data[selectedChromo]) + minRankIndex = list(np.where(matrixOperationRaw[:,1] == matrixOperationRaw[:,1].min())[0]) + if len(minRankIndex) != 1: # More than one chrosome having same rank. + minRankNmaxCDIndex = list(np.where(matrixOperationRaw[minRankIndex,2] == matrixOperationRaw[minRankIndex,2].max())[0]) + else: + minRankNmaxCDIndex = minRankIndex + tournamentWinnerIndex = minRankNmaxCDIndex[0] + selectedParent[i,:] = pop.values[tournamentWinnerIndex,:] return selectedParent From 9d275684b75bb676542f2d2ec8b8ce7e9548010d Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sun, 15 Oct 2023 00:34:12 -0600 Subject: [PATCH 54/84] simpleKnapsackTournament optOut file is regoldened. Final solution is still same to old gold file. --- .../PrintOptOut_1.csv | 142 +++++++++--------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv index 86fac51f8a..a5bd67a05d 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv @@ -1,71 +1,71 @@ -proj1,proj2,proj3,proj4,proj5,proj6,proj7,proj8,proj9,proj10,planValue,validPlan,PointProbability,ProbabilityWeight-proj5,ProbabilityWeight-proj4,prefix,ProbabilityWeight-proj7,ProbabilityWeight-proj9,ProbabilityWeight-proj6,ProbabilityWeight-proj8,ProbabilityWeight,ProbabilityWeight-proj2,ProbabilityWeight-proj10,ProbabilityWeight-proj3,ProbabilityWeight-proj1,batchId -0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,5,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,-1,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,-1,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,7,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,7,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,7,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5 +proj1,proj2,proj3,proj4,proj5,proj6,proj7,proj8,proj9,proj10,planValue,validPlan,ProbabilityWeight-proj4,ProbabilityWeight-proj6,PointProbability,ProbabilityWeight-proj8,ProbabilityWeight-proj1,prefix,ProbabilityWeight-proj3,ProbabilityWeight-proj2,ProbabilityWeight-proj5,ProbabilityWeight-proj9,ProbabilityWeight,batchId,ProbabilityWeight-proj7,ProbabilityWeight-proj10 +0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,2,1.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 From 255b58f1200f6ee0f9dfec64320d71336efc5190 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Sun, 28 Jan 2024 18:15:29 -0700 Subject: [PATCH 55/84] Comments from Mohammad are reflected. --- ravenframework/Optimizers/Optimizer.py | 2 +- ravenframework/Optimizers/fitness/fitness.py | 45 +-- .../continuous/unconstrained/ZDT1.xml | 2 +- .../unconstrained/ZDT1/opt_export_0.csv | 321 +++++++++--------- .../Multi_MinwoReplacement/opt_export_0.csv | 278 +++++++++++++-- 5 files changed, 423 insertions(+), 225 deletions(-) diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index a9b5600615..68687ced91 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -265,7 +265,7 @@ def handleInput(self, paramInput): if minMax is not None: self._minMax = minMax.value if len(self._minMax) != len(self._objectiveVar): - self.raiseAnError(IOError, 'The number of in -- and in - must be of the same length!') + self.raiseAnError(IOError, 'The length of in -- and in - must be of the same length!') if list(set(self._minMax)-set(['min','max'])) != []: self.raiseAnError(IOError, " under - must be a either 'min' and/or 'max'") diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index c76f6dfab3..d59a119833 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -59,10 +59,10 @@ def invLinear(rlz,**kwargs): the farthest from violating the constraint it is, The highest negative value it have the largest the violation is. @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - a = [1.0] if kwargs['a'] == None else kwargs['a'] - b = [10.0] if kwargs['b'] == None else kwargs['b'] - penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data - objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] + a = [1.0] if kwargs['a'] == None else kwargs['a'] + b = [10.0] if kwargs['b'] == None else kwargs['b'] + penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data + objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] for j in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[j]].data) @@ -76,40 +76,6 @@ def invLinear(rlz,**kwargs): fitnessSet[objVar[j]] = fitness return fitnessSet -#NOTE hardConstraint method will be used later once constraintHandling is realized. Until then, it will be commented. @JunyungKim -# def hardConstraint(rlz,**kwargs): -# r""" -# Fitness method counting the number of constraints violated - -# @ In, rlz, xr.Dataset, containing the evaluation of a certain -# set of individuals (can be the initial population for the very first iteration, -# or a population of offsprings) -# @ In, kwargs, dict, dictionary of parameters for this rank_crowding method: -# objVar, string, the names of the objective variables -# @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome. -# offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome. -# """ -# if isinstance(kwargs['objVar'], str) == True: -# objVar = [kwargs['objVar']] -# else: -# objVar = kwargs['objVar'] -# g = kwargs['constraintFunction'] - -# for j in range(len(objVar)): -# fitness = np.zeros((len(g.data), 1)) -# for i in range(len(fitness)): -# fitness[i] = countConstViolation(g.data[i]) -# fitness = [-item for sublist in fitness.tolist() for item in sublist] -# fitness = xr.DataArray(fitness, -# dims=['NumOfConstraintViolated'], -# coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])}) -# if j == 0: -# fitnessSet = fitness.to_dataset(name = objVar[j]) -# else: -# fitnessSet[objVar[j]] = fitness - -# return fitnessSet - def feasibleFirst(rlz,**kwargs): r""" @@ -143,9 +109,8 @@ def feasibleFirst(rlz,**kwargs): 'constraintFunction', xr.Dataarray, containing all constraint functions (explicit and implicit) evaluations for the whole population @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - optType = kwargs['type'] objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] - a = 1.0 if kwargs['a'] == None else kwargs['a'] + a = 1.0 if kwargs['a'] == None else kwargs['a'] if kwargs['constraintNum'] == 0: pen = kwargs['b'] else: diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml index d100a928b0..f9891bdd6a 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml @@ -114,7 +114,7 @@ trajID - x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted + x1,x2,x3,obj1,obj2,age,batchId,rank,CD,FitnessEvaluation_obj1, FitnessEvaluation_obj2,accepted diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv index 25ec63951b..422ae2a2a6 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv @@ -1,160 +1,161 @@ -x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted -0.902940987587,0.947612243227,0.840374259707,0.902940987587,3.96681957049,0.0,1.0,3.0,inf,0.0,first -0.227236453264,0.847510234417,0.362760231915,0.227236453264,3.60499993579,0.0,1.0,2.0,inf,0.0,first -0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,1.0,1.0,inf,0.0,first -0.633202111729,0.793545654927,0.564774226762,0.633202111729,3.28234279694,0.0,1.0,2.0,2.0,0.0,first -0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,1.0,1.0,1.32735741676,0.0,first -0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,1.0,1.0,inf,0.0,first -0.331692186261,0.571854743308,0.965348788995,0.331692186261,4.24730587019,0.0,1.0,3.0,inf,0.0,first -0.267873673297,0.166777967281,0.847808119107,0.267873673297,3.00298144409,0.0,1.0,1.0,0.749061564967,0.0,first -0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,1.0,2.0,inf,0.0,first -0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,1.0,1.0,0.672642583243,0.0,first -0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,2.0,1.0,inf,0.0,accepted -0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,2.0,1.0,inf,0.0,accepted -0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,2.0,1.0,1.17548106192,0.0,accepted -0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,2.0,1.0,1.09190519404,0.0,accepted -0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,2.0,1.0,0.908094805963,0.0,accepted -0.713407223745,0.604534715322,0.183404509952,0.713407223745,1.81469798087,0.0,2.0,2.0,inf,0.0,accepted -0.13264102096,0.192211290866,0.4560699904,0.13264102096,2.31985816876,0.0,2.0,2.0,inf,0.0,accepted -0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,2.0,3.0,inf,0.0,accepted -0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,2.0,3.0,inf,0.0,accepted -0.524774661876,0.641621648018,0.39961784645,0.524774661876,2.65265663127,0.0,2.0,3.0,2.0,0.0,accepted -0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,3.0,1.0,inf,0.0,accepted -0.110044764846,0.567700327273,0.738899003886,0.110044764846,4.18400045796,0.0,3.0,1.0,inf,0.0,accepted -0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,0.0,3.0,1.0,1.17365382082,0.0,accepted -0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,0.0,3.0,1.0,0.929582947246,0.0,accepted -0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,3.0,1.0,0.826346179184,0.0,accepted -0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,3.0,2.0,inf,0.0,accepted -0.713407223745,0.604534715322,0.183404509952,0.713407223745,1.81469798087,0.0,3.0,2.0,inf,0.0,accepted -0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,3.0,2.0,1.2778379808,0.0,accepted -0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,3.0,2.0,1.1605567663,0.0,accepted -0.306377726911,0.192211290866,0.449754129036,0.306377726911,1.97909667942,0.0,3.0,3.0,inf,0.0,accepted -0.110044764846,0.567700327273,0.738899003886,0.110044764846,4.18400045796,0.0,4.0,1.0,inf,0.0,accepted -0.772244771889,0.00975325447734,0.39961784645,0.772244771889,0.916378249788,0.0,4.0,1.0,inf,0.0,accepted -0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,0.0,4.0,1.0,1.05555538382,0.0,accepted -0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,0.0,4.0,1.0,0.916323733867,0.0,accepted -0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,4.0,1.0,0.813065271377,0.0,accepted -0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,4.0,1.0,0.249388538585,0.0,accepted -0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,4.0,2.0,inf,0.0,accepted -0.13264102096,0.00975325447734,0.37081825509,0.13264102096,1.60872372044,0.0,4.0,2.0,inf,0.0,accepted -0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,4.0,3.0,inf,0.0,accepted -0.258779981001,0.192211290866,0.293488176375,0.258779981001,1.65969770087,0.0,4.0,3.0,inf,0.0,accepted -0.107891428309,0.192211290866,0.173364647239,0.107891428309,1.6211030056,4.0,5.0,1.0,inf,0.0,accepted -0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,4.0,5.0,1.0,inf,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,4.0,5.0,1.0,1.6002571283,0.0,accepted -0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,4.0,5.0,1.0,1.00964323224,0.0,accepted -0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,4.0,5.0,1.0,0.399742871701,0.0,accepted -0.772244771889,0.00975325447734,0.39961784645,0.772244771889,0.916378249788,4.0,5.0,2.0,inf,0.0,accepted -0.110044764846,0.192211290866,0.624354044354,0.110044764846,2.83356210322,4.0,5.0,2.0,inf,0.0,accepted -0.13264102096,0.00975325447734,0.37081825509,0.13264102096,1.60872372044,4.0,5.0,2.0,1.67384162643,0.0,accepted -0.713407223745,0.161221285621,0.431945021132,0.713407223745,1.37133891227,4.0,5.0,2.0,1.10926623497,0.0,accepted -0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,4.0,5.0,2.0,0.326158373566,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,6.0,1.0,inf,0.0,accepted -0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,6.0,1.0,inf,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,6.0,1.0,1.10932398766,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,6.0,1.0,0.890676012342,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,6.0,1.0,0.712923687869,0.0,accepted -0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,1.0,6.0,2.0,inf,0.0,accepted -0.0648922466358,0.00975325447734,0.37081825509,0.0648922466358,1.76891341897,1.0,6.0,2.0,inf,0.0,accepted -0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,1.0,6.0,2.0,1.72344612754,0.0,accepted -0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,1.0,6.0,2.0,0.626926698592,0.0,accepted -0.107891428309,0.192211290866,0.173364647239,0.107891428309,1.6211030056,1.0,6.0,2.0,0.27655387246,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,7.0,1.0,inf,0.0,accepted -0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,7.0,1.0,inf,0.0,accepted -0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,1.0,7.0,1.0,0.865064939691,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,7.0,1.0,0.712923687869,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,7.0,1.0,0.651756198699,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,7.0,1.0,0.634832572855,0.0,accepted -0.0648922466358,0.00975325447734,0.242159936633,0.0648922466358,1.41819863445,1.0,7.0,1.0,0.422011372441,0.0,accepted -0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,1.0,7.0,2.0,inf,0.0,accepted -0.00695213070301,0.00975325447734,0.367783134656,0.00695213070301,2.01084637477,1.0,7.0,2.0,inf,0.0,accepted -0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,1.0,7.0,2.0,2.0,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,2.0,8.0,1.0,inf,0.0,accepted -0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,2.0,8.0,1.0,inf,0.0,accepted -0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,2.0,8.0,1.0,0.865064939691,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,2.0,8.0,1.0,0.712923687869,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,2.0,8.0,1.0,0.651756198699,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,2.0,8.0,1.0,0.634832572855,0.0,accepted -0.0648922466358,0.00975325447734,0.242159936633,0.0648922466358,1.41819863445,2.0,8.0,1.0,0.422011372441,0.0,accepted -0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,2.0,8.0,2.0,inf,0.0,accepted -0.00695213070301,0.00975325447734,0.367783134656,0.00695213070301,2.01084637477,2.0,8.0,2.0,inf,0.0,accepted -0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,2.0,8.0,2.0,2.0,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,0.0,9.0,1.0,inf,0.0,accepted -0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,0.0,9.0,1.0,inf,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,0.0,9.0,1.0,0.651756198699,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,0.0,9.0,1.0,0.513796266194,0.0,accepted -0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,0.0,9.0,1.0,0.468664639901,0.0,accepted -0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,0.0,9.0,1.0,0.454750192604,0.0,accepted -0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,0.0,9.0,1.0,0.422011372441,0.0,accepted -0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,0.0,9.0,1.0,0.410314747086,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,0.0,9.0,1.0,0.353145324752,0.0,accepted -0.568308599426,0.00975325447734,0.242159936633,0.568308599426,0.756839229015,0.0,9.0,1.0,0.228270418055,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,10.0,1.0,inf,0.0,accepted -0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,10.0,1.0,inf,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,10.0,1.0,0.651756198699,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,10.0,1.0,0.513796266194,0.0,accepted -0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,1.0,10.0,1.0,0.454750192604,0.0,accepted -0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,1.0,10.0,1.0,0.422011372441,0.0,accepted -0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,1.0,10.0,1.0,0.410314747086,0.0,accepted -0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,1.0,10.0,1.0,0.468664639901,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,10.0,1.0,0.353145324752,0.0,accepted -0.568308599426,0.00975325447734,0.242159936633,0.568308599426,0.756839229015,1.0,10.0,1.0,0.228270418055,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,2.0,11.0,1.0,inf,0.0,accepted -0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,2.0,11.0,1.0,inf,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,2.0,11.0,1.0,0.650175533333,0.0,accepted -0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,2.0,11.0,1.0,0.634536882707,0.0,accepted -0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,2.0,11.0,1.0,0.420037013968,0.0,accepted -0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,2.0,11.0,1.0,0.463954261861,0.0,accepted -0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,2.0,11.0,1.0,0.357366573279,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,2.0,11.0,1.0,0.351855878369,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,2.0,11.0,1.0,0.31630181039,0.0,accepted -0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,2.0,11.0,1.0,0.303090139359,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,6.0,12.0,1.0,inf,0.0,accepted -0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,6.0,12.0,1.0,inf,0.0,accepted -0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,6.0,12.0,1.0,0.650175533333,0.0,accepted -0.649632900872,0.00518486043559,0.184333673023,0.649632900872,0.559107570583,6.0,12.0,1.0,0.444060435829,0.0,accepted -0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,6.0,12.0,1.0,0.426680098555,0.0,accepted -0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,6.0,12.0,1.0,0.420037013968,0.0,accepted -0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,6.0,12.0,1.0,0.465833190838,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,6.0,12.0,1.0,0.546479400168,0.0,accepted -0.540635119784,0.00518486043559,0.228798159219,0.540635119784,0.742713199876,6.0,12.0,1.0,0.344191262312,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,6.0,12.0,1.0,0.31630181039,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,7.0,13.0,1.0,inf,0.0,accepted -0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,7.0,13.0,1.0,inf,0.0,accepted -0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,7.0,13.0,1.0,1.11412979519,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,7.0,13.0,1.0,0.818090041033,0.0,accepted -0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,7.0,13.0,1.0,0.527591261357,0.0,accepted -0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,7.0,13.0,1.0,0.368715073082,0.0,accepted -0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,7.0,13.0,1.0,0.358278943448,0.0,accepted -0.649632900872,0.00518486043559,0.184333673023,0.649632900872,0.559107570583,7.0,13.0,2.0,inf,0.0,accepted -0.00695213070301,0.00975325447734,0.34187967245,0.00695213070301,1.93537503877,7.0,13.0,2.0,inf,0.0,accepted -0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,7.0,13.0,2.0,2.0,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,14.0,1.0,inf,0.0,accepted -0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,1.0,14.0,1.0,inf,0.0,accepted -0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,1.0,14.0,1.0,1.149052421,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,14.0,1.0,0.776808582325,0.0,accepted -0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,1.0,14.0,1.0,0.46393693418,0.0,accepted -0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,1.0,14.0,1.0,0.333067034374,0.0,accepted -0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,1.0,14.0,1.0,0.228915255524,0.0,accepted -0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,1.0,14.0,1.0,0.191580577061,0.0,accepted -0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,1.0,14.0,2.0,inf,0.0,accepted -0.00695213070301,0.00975325447734,0.34187967245,0.00695213070301,1.93537503877,1.0,14.0,2.0,inf,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,4.0,15.0,1.0,inf,0.0,accepted -0.902552906634,0.00518486043559,0.0460026422623,0.902552906634,0.13319434186,4.0,15.0,1.0,inf,0.0,accepted -0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,4.0,15.0,1.0,0.94061235925,0.0,accepted -0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,4.0,15.0,1.0,0.879443568797,0.0,accepted -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,4.0,15.0,1.0,0.618089346206,0.0,accepted -0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,4.0,15.0,1.0,0.35946352349,0.0,accepted -0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,4.0,15.0,1.0,0.269867635023,0.0,accepted -0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,4.0,15.0,1.0,0.171430659521,0.0,accepted -0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,4.0,15.0,1.0,0.150132989794,0.0,accepted -0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,4.0,15.0,2.0,inf,0.0,accepted -0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,4.0,15.0,1.0,inf,0.0,final -0.902552906634,0.00518486043559,0.0460026422623,0.902552906634,0.13319434186,4.0,15.0,1.0,inf,0.0,final -0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,4.0,15.0,1.0,0.94061235925,0.0,final -0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,4.0,15.0,1.0,0.879443568797,0.0,final -0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,4.0,15.0,1.0,0.618089346206,0.0,final -0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,4.0,15.0,1.0,0.35946352349,0.0,final -0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,4.0,15.0,1.0,0.269867635023,0.0,final -0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,4.0,15.0,1.0,0.171430659521,0.0,final -0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,4.0,15.0,1.0,0.150132989794,0.0,final +x1,x2,x3,obj1,obj2,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,accepted +0.902940987587,0.947612243227,0.840374259707,0.902940987587,3.96681957049,0.0,1.0,3.0,inf,0.902940987587,3.96681957049,first +0.227236453264,0.847510234417,0.362760231915,0.227236453264,3.60499993579,0.0,1.0,2.0,inf,0.227236453264,3.60499993579,first +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,1.0,1.0,inf,0.766431005617,1.3169883176,first +0.633202111729,0.793545654927,0.564774226762,0.633202111729,3.28234279694,0.0,1.0,2.0,2.0,0.633202111729,3.28234279694,first +0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,1.0,1.0,1.32735741676,0.306377726911,1.93224686343,first +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,1.0,1.0,inf,0.110044764846,4.28628616584,first +0.331692186261,0.571854743308,0.965348788995,0.331692186261,4.24730587019,0.0,1.0,3.0,inf,0.331692186261,4.24730587019,first +0.267873673297,0.166777967281,0.847808119107,0.267873673297,3.00298144409,0.0,1.0,1.0,0.749061564967,0.267873673297,3.00298144409,first +0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,1.0,2.0,inf,0.713407223745,2.25417202135,first +0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,1.0,1.0,0.672642583243,0.13264102096,3.37050011696,first +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,2.0,1.0,inf,0.766431005617,1.3169883176,accepted +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,2.0,1.0,inf,0.110044764846,4.28628616584,accepted +0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,2.0,1.0,1.65715637093,0.227236453264,1.41712807233,accepted +0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,2.0,1.0,1.14481568274,0.13264102096,3.37050011696,accepted +0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,2.0,2.0,inf,0.227236453264,1.70412941407,accepted +0.227236453264,0.304242241034,0.362760231915,0.227236453264,2.17521187389,0.0,2.0,3.0,inf,0.227236453264,2.17521187389,accepted +0.766431005617,0.399860977754,0.39961784645,0.766431005617,1.78453837074,0.0,2.0,3.0,inf,0.766431005617,1.78453837074,accepted +0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,2.0,3.0,2.0,0.306377726911,1.93224686343,accepted +0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,2.0,4.0,inf,0.713407223745,2.25417202135,accepted +0.227236453264,0.192211290866,0.785175960228,0.227236453264,2.98689433584,0.0,2.0,4.0,inf,0.227236453264,2.98689433584,accepted +0.110044764846,0.207941663733,0.738899003886,0.110044764846,3.19042251763,0.0,3.0,1.0,inf,0.110044764846,3.19042251763,accepted +0.766431005617,0.184854460225,0.39961784645,0.766431005617,1.300728344,0.0,3.0,1.0,inf,0.766431005617,1.300728344,accepted +0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,3.0,1.0,1.2940861679,0.227236453264,1.41712807233,accepted +0.182236086852,0.192211290866,0.39961784645,0.182236086852,2.0642954793,0.0,3.0,1.0,1.11694361992,0.182236086852,2.0642954793,accepted +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,3.0,2.0,inf,0.766431005617,1.3169883176,accepted +0.110044764846,0.192211290866,0.926658862253,0.110044764846,3.66420727224,0.0,3.0,2.0,inf,0.110044764846,3.66420727224,accepted +0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,3.0,2.0,1.84044490677,0.227236453264,1.70412941407,accepted +0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,3.0,2.0,1.01360466361,0.13264102096,3.37050011696,accepted +0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,3.0,3.0,inf,0.110044764846,4.28628616584,accepted +0.766431005617,0.192211290866,0.570443976105,0.766431005617,1.70051534965,0.0,3.0,3.0,inf,0.766431005617,1.70051534965,accepted +0.766431005617,0.184854460225,0.39961784645,0.766431005617,1.300728344,0.0,4.0,1.0,inf,0.766431005617,1.300728344,accepted +0.110044764846,0.207941663733,0.37081825509,0.110044764846,2.18754187519,0.0,4.0,1.0,inf,0.110044764846,2.18754187519,accepted +0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,4.0,1.0,1.75104033403,0.227236453264,1.41712807233,accepted +0.182236086852,0.192211290866,0.39961784645,0.182236086852,2.0642954793,0.0,4.0,1.0,1.04728459672,0.182236086852,2.0642954793,accepted +0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,4.0,2.0,inf,0.766431005617,1.3169883176,accepted +0.110044764846,0.115869054598,0.738899003886,0.110044764846,2.93801908476,0.0,4.0,2.0,inf,0.110044764846,2.93801908476,accepted +0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,4.0,2.0,2.0,0.227236453264,1.70412941407,accepted +0.110044764846,0.207941663733,0.738899003886,0.110044764846,3.19042251763,0.0,4.0,3.0,inf,0.110044764846,3.19042251763,accepted +0.766431005617,0.192211290866,0.570443976105,0.766431005617,1.70051534965,0.0,4.0,3.0,inf,0.766431005617,1.70051534965,accepted +0.330898024452,0.207941663733,0.738899003886,0.330898024452,2.71321497121,0.0,4.0,3.0,2.0,0.330898024452,2.71321497121,accepted +0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,0.0,5.0,1.0,inf,0.110044764846,1.50665039766,accepted +0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,0.0,5.0,1.0,inf,0.766431005617,0.61568905179,accepted +0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,0.0,5.0,1.0,1.72098090572,0.694784936191,1.37029270323,accepted +0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,5.0,1.0,1.04389327594,0.227236453264,1.41712807233,accepted +0.110044764846,0.207941663733,0.37081825509,0.110044764846,2.18754187519,0.0,5.0,2.0,inf,0.110044764846,2.18754187519,accepted +0.766431005617,0.184854460225,0.203061229597,0.766431005617,0.875972693903,0.0,5.0,2.0,inf,0.766431005617,0.875972693903,accepted +0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,5.0,2.0,1.79604835097,0.227236453264,1.70412941407,accepted +0.182236086852,0.192211290866,0.39961784645,0.182236086852,2.0642954793,0.0,5.0,2.0,0.547116393146,0.182236086852,2.0642954793,accepted +0.110044764846,0.115869054598,0.738899003886,0.110044764846,2.93801908476,0.0,5.0,3.0,inf,0.110044764846,2.93801908476,accepted +0.766431005617,0.207941663733,0.218440438439,0.766431005617,0.957477514551,0.0,5.0,3.0,inf,0.766431005617,0.957477514551,accepted +0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,0.0,6.0,1.0,inf,0.766431005617,0.61568905179,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,0.0,6.0,1.0,inf,0.110044764846,1.2371480565,accepted +0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,0.0,6.0,1.0,2.0,0.337615172224,1.00909282563,accepted +0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,0.0,6.0,2.0,inf,0.110044764846,1.50665039766,accepted +0.766431005617,0.184854460225,0.203061229597,0.766431005617,0.875972693903,0.0,6.0,2.0,inf,0.766431005617,0.875972693903,accepted +0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,0.0,6.0,2.0,1.67951302037,0.694784936191,1.37029270323,accepted +0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,6.0,2.0,0.930869456237,0.227236453264,1.41712807233,accepted +0.148086928797,0.184854460225,0.139331453512,0.148086928797,1.43208570884,0.0,6.0,2.0,0.32048697963,0.148086928797,1.43208570884,accepted +0.766431005617,0.207941663733,0.218440438439,0.766431005617,0.957477514551,0.0,6.0,3.0,inf,0.766431005617,0.957477514551,accepted +0.110044764846,0.184854460225,0.168291045858,0.110044764846,1.58337958114,0.0,6.0,3.0,inf,0.110044764846,1.58337958114,accepted +0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,2.0,7.0,1.0,inf,0.766431005617,0.61568905179,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,2.0,7.0,1.0,inf,0.110044764846,1.2371480565,accepted +0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,2.0,7.0,1.0,1.47967201149,0.337615172224,1.00909282563,accepted +0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,2.0,7.0,1.0,1.28633056549,0.535774680445,0.720668788305,accepted +0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,2.0,7.0,2.0,inf,0.110044764846,1.50665039766,accepted +0.766431005617,0.220241048192,0.0769799126026,0.766431005617,0.687574395089,2.0,7.0,2.0,inf,0.766431005617,0.687574395089,accepted +0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,2.0,7.0,2.0,1.71216252914,0.694784936191,1.37029270323,accepted +0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,2.0,7.0,2.0,0.908333052367,0.227236453264,1.41712807233,accepted +0.148086928797,0.184854460225,0.139331453512,0.148086928797,1.43208570884,2.0,7.0,2.0,0.287837470856,0.148086928797,1.43208570884,accepted +0.766431005617,0.184854460225,0.203061229597,0.766431005617,0.875972693903,2.0,7.0,3.0,inf,0.766431005617,0.875972693903,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,6.0,8.0,1.0,inf,0.110044764846,1.2371480565,accepted +0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,6.0,8.0,1.0,inf,0.766431005617,0.276402028431,accepted +0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,6.0,8.0,1.0,1.41592495639,0.535774680445,0.720668788305,accepted +0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,6.0,8.0,1.0,0.765262284562,0.337615172224,1.00909282563,accepted +0.278871351918,0.184854460225,0.0769799126026,0.278871351918,1.07986463561,6.0,8.0,1.0,0.584075043611,0.278871351918,1.07986463561,accepted +0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,6.0,8.0,2.0,inf,0.110044764846,1.50665039766,accepted +0.77987554478,0.184854460225,0.0769799126026,0.77987554478,0.605473365792,6.0,8.0,2.0,inf,0.77987554478,0.605473365792,accepted +0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,6.0,8.0,2.0,1.69429602798,0.694784936191,1.37029270323,accepted +0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,6.0,8.0,2.0,0.97572232943,0.766431005617,0.61568905179,accepted +0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,6.0,8.0,2.0,1.02427767057,0.227236453264,1.41712807233,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,1.0,9.0,1.0,inf,0.110044764846,1.2371480565,accepted +0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,1.0,9.0,1.0,inf,0.766431005617,0.276402028431,accepted +0.57200419986,0.184854460225,0.0368869491007,0.57200419986,0.689255349318,1.0,9.0,1.0,0.813821875556,0.57200419986,0.689255349318,accepted +0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,1.0,9.0,1.0,0.624852683938,0.337615172224,1.00909282563,accepted +0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,1.0,9.0,1.0,0.602103080833,0.486742150617,0.764927153681,accepted +0.250461821735,0.184854460225,0.0368869491007,0.250461821735,1.01941071485,1.0,9.0,1.0,0.584075043611,0.250461821735,1.01941071485,accepted +0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,1.0,9.0,1.0,0.208659743602,0.535774680445,0.720668788305,accepted +0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,1.0,9.0,2.0,inf,0.110044764846,1.50665039766,accepted +0.864167563818,0.184854460225,0.0368869491007,0.864167563818,0.465627310549,1.0,9.0,2.0,inf,0.864167563818,0.465627310549,accepted +0.278871351918,0.184854460225,0.0769799126026,0.278871351918,1.07986463561,1.0,9.0,2.0,2.0,0.278871351918,1.07986463561,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,5.0,10.0,1.0,inf,0.110044764846,1.2371480565,accepted +0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,5.0,10.0,1.0,inf,0.740768615329,0.220002925316,accepted +0.57200419986,0.184854460225,0.0368869491007,0.57200419986,0.689255349318,5.0,10.0,1.0,0.817240341286,0.57200419986,0.689255349318,accepted +0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,5.0,10.0,1.0,0.624811669954,0.337615172224,1.00909282563,accepted +0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,5.0,10.0,1.0,0.597740245314,0.486742150617,0.764927153681,accepted +0.250461821735,0.184854460225,0.0368869491007,0.250461821735,1.01941071485,5.0,10.0,1.0,0.483412783551,0.250461821735,1.01941071485,accepted +0.140084013608,0.184854460225,0.0368869491007,0.140084013608,1.18224255812,5.0,10.0,1.0,0.436695556704,0.140084013608,1.18224255812,accepted +0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,5.0,10.0,1.0,0.209577537556,0.535774680445,0.720668788305,accepted +0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,5.0,10.0,2.0,inf,0.766431005617,0.276402028431,accepted +0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,5.0,10.0,2.0,inf,0.110044764846,1.50665039766,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,0.0,11.0,1.0,inf,0.110044764846,1.2371480565,accepted +0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,0.0,11.0,1.0,inf,0.740768615329,0.220002925316,accepted +0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,0.0,11.0,1.0,1.4149805866,0.465598022674,0.410263067925,accepted +0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,0.0,11.0,1.0,0.939973901275,0.337615172224,1.00909282563,accepted +0.250461821735,0.184854460225,0.0368869491007,0.250461821735,1.01941071485,0.0,11.0,1.0,0.483412783551,0.250461821735,1.01941071485,accepted +0.140084013608,0.184854460225,0.0368869491007,0.140084013608,1.18224255812,0.0,11.0,1.0,0.436695556704,0.140084013608,1.18224255812,accepted +0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,0.0,11.0,2.0,inf,0.766431005617,0.276402028431,accepted +0.110044764846,0.00919704931071,0.250251364021,0.110044764846,1.33596804376,0.0,11.0,2.0,inf,0.110044764846,1.33596804376,accepted +0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,0.0,11.0,2.0,1.86602707654,0.486742150617,0.764927153681,accepted +0.740768615329,0.0939819368287,0.0368869491007,0.740768615329,0.376929885588,0.0,11.0,2.0,0.887165606871,0.740768615329,0.376929885588,accepted +0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,2.0,12.0,1.0,inf,0.740768615329,0.220002925316,accepted +0.110044764846,0.0938732896684,0.0368869491007,0.110044764846,1.0008561335,2.0,12.0,1.0,inf,0.110044764846,1.0008561335,accepted +0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,2.0,12.0,1.0,2.0,0.465598022674,0.410263067925,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,2.0,12.0,2.0,inf,0.110044764846,1.2371480565,accepted +0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,2.0,12.0,2.0,inf,0.766431005617,0.276402028431,accepted +0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,2.0,12.0,2.0,0.848182252541,0.486742150617,0.764927153681,accepted +0.638270586878,0.184854460225,0.0368869491007,0.638270586878,0.634271442795,2.0,12.0,2.0,0.790857585549,0.638270586878,0.634271442795,accepted +0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,2.0,12.0,2.0,0.962497323004,0.337615172224,1.00909282563,accepted +0.740768615329,0.0939819368287,0.0368869491007,0.740768615329,0.376929885588,2.0,12.0,2.0,0.567742703848,0.740768615329,0.376929885588,accepted +0.140084013608,0.184854460225,0.0368869491007,0.140084013608,1.18224255812,2.0,12.0,2.0,0.584075043611,0.140084013608,1.18224255812,accepted +0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,7.0,13.0,1.0,inf,0.740768615329,0.220002925316,accepted +0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,7.0,13.0,1.0,inf,0.0480589254405,0.904364911855,accepted +0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,7.0,13.0,1.0,1.32475105339,0.43385165125,0.435519821735,accepted +0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,7.0,13.0,1.0,0.757983752596,0.465598022674,0.410263067925,accepted +0.110044764846,0.0938732896684,0.0368869491007,0.110044764846,1.0008561335,7.0,13.0,2.0,inf,0.110044764846,1.0008561335,accepted +0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,7.0,13.0,2.0,inf,0.766431005617,0.276402028431,accepted +0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,7.0,13.0,2.0,1.31076353304,0.486742150617,0.764927153681,accepted +0.638270586878,0.184854460225,0.0368869491007,0.638270586878,0.634271442795,7.0,13.0,2.0,0.9225795106,0.638270586878,0.634271442795,accepted +0.740768615329,0.0939819368287,0.0368869491007,0.740768615329,0.376929885588,7.0,13.0,2.0,0.689236466957,0.740768615329,0.376929885588,accepted +0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,7.0,13.0,3.0,inf,0.110044764846,1.2371480565,accepted +0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,2.0,14.0,1.0,inf,0.0480589254405,0.904364911855,accepted +0.844875311909,0.00919704931071,0.0368869491007,0.844875311909,0.157598636751,2.0,14.0,1.0,inf,0.844875311909,0.157598636751,accepted +0.281854773006,0.00919704931071,0.0368869491007,0.281854773006,0.571840697907,2.0,14.0,1.0,1.11200143643,0.281854773006,0.571840697907,accepted +0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,2.0,14.0,1.0,0.446966472614,0.43385165125,0.435519821735,accepted +0.628942842742,0.00919704931071,0.0368869491007,0.628942842742,0.292145946539,2.0,14.0,1.0,0.414011590831,0.628942842742,0.292145946539,accepted +0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,2.0,14.0,1.0,0.366643659721,0.740768615329,0.220002925316,accepted +0.547971882752,0.00919704931071,0.0368869491007,0.547971882752,0.348486150901,2.0,14.0,1.0,0.363168285924,0.547971882752,0.348486150901,accepted +0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,2.0,14.0,1.0,0.259767640834,0.465598022674,0.410263067925,accepted +0.803480928019,0.00919704931071,0.0368869491007,0.803480928019,0.181923720294,2.0,14.0,1.0,0.214219331903,0.803480928019,0.181923720294,accepted +0.110044764846,0.0938732896684,0.0368869491007,0.110044764846,1.0008561335,2.0,14.0,2.0,inf,0.110044764846,1.0008561335,accepted +0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,4.0,15.0,1.0,inf,0.0480589254405,0.904364911855,accepted +0.956800884557,0.00919704931071,0.0368869491007,0.956800884557,0.094661790274,4.0,15.0,1.0,inf,0.956800884557,0.094661790274,accepted +0.281854773006,0.00919704931071,0.0368869491007,0.281854773006,0.571840697907,4.0,15.0,1.0,0.704629371176,0.281854773006,0.571840697907,accepted +0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,4.0,15.0,1.0,0.36506593516,0.43385165125,0.435519821735,accepted +0.2976351707,0.00919704931071,0.0368869491007,0.2976351707,0.556200599822,4.0,15.0,1.0,0.335619852656,0.2976351707,0.556200599822,accepted +0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,4.0,15.0,1.0,0.403785617585,0.740768615329,0.220002925316,accepted +0.547971882752,0.00919704931071,0.0368869491007,0.547971882752,0.348486150901,4.0,15.0,1.0,0.325625370568,0.547971882752,0.348486150901,accepted +0.628942842742,0.00919704931071,0.0368869491007,0.628942842742,0.292145946539,4.0,15.0,1.0,0.370837266347,0.628942842742,0.292145946539,accepted +0.844875311909,0.00919704931071,0.0368869491007,0.844875311909,0.157598636751,4.0,15.0,1.0,0.392525624748,0.844875311909,0.157598636751,accepted +0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,4.0,15.0,1.0,0.233068830687,0.465598022674,0.410263067925,accepted +0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,4.0,15.0,1.0,inf,0.0480589254405,0.904364911855,final +0.956800884557,0.00919704931071,0.0368869491007,0.956800884557,0.094661790274,4.0,15.0,1.0,inf,0.956800884557,0.094661790274,final +0.281854773006,0.00919704931071,0.0368869491007,0.281854773006,0.571840697907,4.0,15.0,1.0,0.704629371176,0.281854773006,0.571840697907,final +0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,4.0,15.0,1.0,0.36506593516,0.43385165125,0.435519821735,final +0.2976351707,0.00919704931071,0.0368869491007,0.2976351707,0.556200599822,4.0,15.0,1.0,0.335619852656,0.2976351707,0.556200599822,final +0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,4.0,15.0,1.0,0.403785617585,0.740768615329,0.220002925316,final +0.547971882752,0.00919704931071,0.0368869491007,0.547971882752,0.348486150901,4.0,15.0,1.0,0.325625370568,0.547971882752,0.348486150901,final +0.628942842742,0.00919704931071,0.0368869491007,0.628942842742,0.292145946539,4.0,15.0,1.0,0.370837266347,0.628942842742,0.292145946539,final +0.844875311909,0.00919704931071,0.0368869491007,0.844875311909,0.157598636751,4.0,15.0,1.0,0.392525624748,0.844875311909,0.157598636751,final +0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,4.0,15.0,1.0,0.233068830687,0.465598022674,0.410263067925,final diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv index dc39c524eb..74d8849c58 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv @@ -1,23 +1,255 @@ -x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,fitness,accepted -4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,0.0,1.0,2.0,inf,-2.0,1.0,1.0,first -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,1.0,2.0,inf,3.0,11.0,0.0,first -4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,0.0,1.0,1.0,inf,-3.0,4.0,1.0,first -7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,1.0,1.0,inf,2.0,14.0,0.0,first -4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,0.0,1.0,3.0,inf,1.0,10.0,0.0,first -4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,0.0,1.0,4.0,inf,3.0,5.0,0.0,first -4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,0.0,1.0,5.0,inf,3.0,5.0,0.0,first -6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,1.0,2.0,2.0,3.0,3.0,0.0,first -4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,1.0,1.0,2.0,1.0,6.0,0.0,first -2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,0.0,1.0,3.0,inf,-1.0,-5.0,2.0,first -7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,accepted -7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,1.0,2.0,2.0,inf,2.0,14.0,0.0,accepted -6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,accepted -6.0,4.0,7.0,2.0,3.0,5.0,88.0,14.0,1.0,2.0,2.0,inf,1.0,12.0,0.0,accepted -6.0,4.0,3.0,7.0,5.0,2.0,88.0,14.0,1.0,2.0,3.0,inf,0.0,12.0,0.0,accepted -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,4.0,inf,3.0,11.0,0.0,accepted -4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,1.0,2.0,4.0,inf,1.0,6.0,0.0,accepted -4.0,5.0,7.0,2.0,3.0,6.0,94.0,14.0,1.0,2.0,5.0,inf,1.0,6.0,0.0,accepted -7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,5.0,inf,3.0,11.0,0.0,accepted -4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,1.0,2.0,6.0,inf,1.0,10.0,0.0,accepted -7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,final -6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,final +x1,x2,x3,x4,x5,x6,obj1,obj2,obj3,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,1.0,14.0,inf,210.0,193.0,-2.0,1.0,first +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,0.0,1.0,3.0,1.6,89.0,-15.0,3.0,11.0,first +4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,46.0,0.0,1.0,18.0,inf,260.0,293.0,-3.0,4.0,first +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,0.0,1.0,3.0,inf,86.0,-15.0,2.0,14.0,first +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,0.0,1.0,4.0,inf,90.0,-16.0,1.0,10.0,first +4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,0.0,1.0,4.0,0.899122807018,95.0,-16.0,3.0,5.0,first +4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,0.0,1.0,3.0,inf,95.0,-18.0,3.0,5.0,first +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,0.0,1.0,5.0,1.59523809524,94.0,-14.0,1.0,6.0,first +2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,38.0,0.0,1.0,24.0,inf,410.0,593.0,-1.0,-5.0,first +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,1.0,1.0,inf,85.0,-19.0,5.0,15.0,first +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,1.0,4.0,inf,97.0,-12.0,3.0,3.0,first +6.0,2.0,7.0,5.0,3.0,4.0,90.0,10.0,41.0,0.0,1.0,15.0,inf,210.0,193.0,-2.0,10.0,first +3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,1.0,8.0,inf,160.0,93.0,-1.0,8.0,first +2.0,3.0,6.0,7.0,4.0,5.0,104.0,8.0,46.0,0.0,1.0,26.0,inf,460.0,693.0,-3.0,-4.0,first +7.0,3.0,5.0,4.0,2.0,6.0,90.0,13.0,31.0,0.0,1.0,5.0,inf,90.0,-13.0,1.0,10.0,first +5.0,3.0,2.0,6.0,4.0,7.0,103.0,11.0,30.0,0.0,1.0,22.0,inf,260.0,293.0,2.0,-3.0,first +6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,1.0,9.0,inf,160.0,93.0,-1.0,8.0,first +2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,1.0,13.0,inf,160.0,93.0,0.0,-1.0,first +4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,0.0,1.0,5.0,inf,93.0,-16.0,1.0,7.0,first +5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,0.0,1.0,2.0,inf,93.0,-17.0,5.0,7.0,first +3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,1.0,10.0,inf,160.0,93.0,-1.0,11.0,first +3.0,6.0,4.0,2.0,5.0,7.0,102.0,15.0,20.0,0.0,1.0,17.0,inf,210.0,193.0,4.0,-2.0,first +6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,0.0,1.0,5.0,inf,99.0,-10.0,2.0,1.0,first +4.0,3.0,2.0,6.0,5.0,7.0,107.0,10.0,30.0,0.0,1.0,27.0,inf,460.0,693.0,2.0,-7.0,first +7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,1.0,11.0,inf,160.0,93.0,-1.0,12.0,first +5.0,3.0,6.0,7.0,2.0,4.0,91.0,11.0,46.0,0.0,1.0,20.0,inf,260.0,293.0,-3.0,9.0,first +4.0,2.0,3.0,5.0,6.0,7.0,109.0,8.0,29.0,0.0,1.0,29.0,inf,560.0,893.0,2.0,-9.0,first +5.0,3.0,6.0,7.0,4.0,2.0,89.0,11.0,46.0,0.0,1.0,21.0,inf,260.0,293.0,-3.0,11.0,first +2.0,3.0,5.0,6.0,4.0,7.0,109.0,8.0,39.0,0.0,1.0,30.0,inf,610.0,993.0,-1.0,-9.0,first +3.0,2.0,7.0,4.0,6.0,5.0,104.0,7.0,37.0,0.0,1.0,23.0,inf,360.0,493.0,-1.0,-4.0,first +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,1.0,1.0,0.907894736842,83.0,-19.0,0.0,17.0,first +3.0,5.0,6.0,4.0,7.0,2.0,94.0,13.0,34.0,0.0,1.0,6.0,inf,94.0,-13.0,0.0,6.0,first +7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,0.0,1.0,2.0,inf,86.0,-19.0,4.0,14.0,first +7.0,2.0,5.0,4.0,6.0,3.0,90.0,11.0,31.0,0.0,1.0,6.0,inf,90.0,-11.0,1.0,10.0,first +3.0,4.0,2.0,6.0,7.0,5.0,106.0,11.0,30.0,0.0,1.0,25.0,inf,410.0,593.0,2.0,-6.0,first +4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,0.0,1.0,4.0,inf,98.0,-16.0,5.0,2.0,first +4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,0.0,1.0,4.0,1.45175438596,89.0,-16.0,0.0,11.0,first +5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,0.0,1.0,3.0,1.43333333333,94.0,-17.0,5.0,6.0,first +7.0,2.0,5.0,4.0,3.0,6.0,93.0,11.0,31.0,0.0,1.0,7.0,inf,93.0,-11.0,1.0,7.0,first +7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,0.0,1.0,4.0,inf,86.0,-13.0,0.0,14.0,first +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,1.0,2.0,inf,83.0,-19.0,0.0,17.0,first +5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,1.0,12.0,inf,160.0,93.0,-1.0,6.0,first +3.0,2.0,5.0,6.0,4.0,7.0,108.0,7.0,39.0,0.0,1.0,28.0,inf,560.0,893.0,-1.0,-8.0,first +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,1.0,1.0,inf,81.0,-17.0,0.0,19.0,first +2.0,4.0,7.0,5.0,6.0,3.0,99.0,10.0,41.0,0.0,1.0,16.0,inf,210.0,193.0,-2.0,1.0,first +4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,0.0,1.0,3.0,inf,88.0,-16.0,1.0,12.0,first +5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,0.0,1.0,3.0,inf,96.0,-17.0,5.0,4.0,first +5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,1.0,1.0,2.5,84.0,-19.0,1.0,16.0,first +4.0,3.0,7.0,6.0,2.0,5.0,95.0,10.0,45.0,0.0,1.0,19.0,inf,260.0,293.0,-3.0,5.0,first +2.0,4.0,3.0,5.0,7.0,6.0,110.0,10.0,29.0,0.0,1.0,31.0,inf,610.0,993.0,2.0,-10.0,first +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,2.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,2.0,1.0,inf,81.0,-17.0,0.0,19.0,accepted +5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,2.0,1.0,1.5,84.0,-19.0,1.0,16.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,2.0,1.0,inf,83.0,-19.0,0.0,17.0,accepted +5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,0.0,2.0,2.0,inf,93.0,-17.0,5.0,7.0,accepted +7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,0.0,2.0,2.0,inf,86.0,-19.0,4.0,14.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,2.0,2.0,inf,83.0,-19.0,0.0,17.0,accepted +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,0.0,2.0,3.0,0.638111888112,86.0,-15.0,2.0,14.0,accepted +4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,0.0,2.0,3.0,inf,95.0,-18.0,3.0,5.0,accepted +4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,0.0,2.0,3.0,1.31118881119,88.0,-16.0,1.0,12.0,accepted +5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,0.0,2.0,3.0,inf,96.0,-17.0,5.0,4.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,0.0,2.0,3.0,1.10314685315,89.0,-15.0,3.0,11.0,accepted +5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,0.0,2.0,3.0,1.02622377622,94.0,-17.0,5.0,6.0,accepted +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,0.0,2.0,4.0,inf,90.0,-16.0,1.0,10.0,accepted +4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,0.0,2.0,4.0,inf,98.0,-16.0,5.0,2.0,accepted +7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,0.0,2.0,4.0,inf,86.0,-13.0,0.0,14.0,accepted +7.0,3.0,5.0,2.0,6.0,4.0,90.0,13.0,23.0,0.0,2.0,4.0,inf,90.0,-13.0,3.0,10.0,accepted +4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,0.0,2.0,4.0,0.460526315789,89.0,-16.0,0.0,11.0,accepted +6.0,4.0,7.0,3.0,2.0,5.0,87.0,14.0,33.0,0.0,2.0,4.0,1.07456140351,87.0,-14.0,0.0,13.0,accepted +6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,0.0,2.0,4.0,0.907894736842,94.0,-14.0,3.0,6.0,accepted +4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,0.0,2.0,4.0,1.31578947368,95.0,-16.0,3.0,5.0,accepted +7.0,3.0,5.0,4.0,2.0,6.0,90.0,13.0,31.0,0.0,2.0,5.0,2.0,90.0,-13.0,1.0,10.0,accepted +4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,0.0,2.0,5.0,inf,93.0,-16.0,1.0,7.0,accepted +6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,0.0,2.0,5.0,inf,94.0,-14.0,3.0,6.0,accepted +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,0.0,2.0,6.0,1.6,94.0,-14.0,1.0,6.0,accepted +7.0,2.0,5.0,4.0,6.0,3.0,90.0,11.0,31.0,0.0,2.0,6.0,inf,90.0,-11.0,1.0,10.0,accepted +6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,0.0,2.0,6.0,inf,94.0,-12.0,3.0,6.0,accepted +3.0,5.0,6.0,4.0,7.0,2.0,94.0,13.0,34.0,0.0,2.0,7.0,inf,94.0,-13.0,0.0,6.0,accepted +7.0,2.0,5.0,4.0,3.0,6.0,93.0,11.0,31.0,0.0,2.0,7.0,inf,93.0,-11.0,1.0,7.0,accepted +6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,0.0,2.0,7.0,inf,94.0,-12.0,3.0,6.0,accepted +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,2.0,9.0,inf,97.0,-12.0,3.0,3.0,accepted +5.0,3.0,7.0,2.0,4.0,6.0,96.0,11.0,29.0,0.0,2.0,8.0,2.075,96.0,-11.0,1.0,4.0,accepted +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,2.0,8.0,inf,97.0,-12.0,3.0,3.0,accepted +6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,0.0,2.0,10.0,inf,99.0,-10.0,2.0,1.0,accepted +5.0,3.0,2.0,7.0,6.0,4.0,99.0,11.0,34.0,0.0,2.0,10.0,1.72916666667,99.0,-11.0,1.0,1.0,accepted +3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,2.0,6.0,inf,160.0,93.0,-1.0,8.0,accepted +6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,2.0,7.0,1.25,160.0,93.0,-1.0,8.0,accepted +3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,2.0,5.0,1.0,160.0,93.0,-1.0,11.0,accepted +7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,2.0,5.0,inf,160.0,93.0,-1.0,12.0,accepted +5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,2.0,8.0,inf,160.0,93.0,-1.0,6.0,accepted +2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,2.0,10.0,inf,160.0,93.0,0.0,-1.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,6.0,inf,199.0,190.0,-2.0,11.0,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,12.0,inf,199.0,190.0,-2.0,1.0,accepted +6.0,4.0,5.0,7.0,2.0,3.0,85.0,14.0,43.0,0.0,2.0,3.0,inf,199.0,190.0,-2.0,15.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,7.0,inf,199.0,190.0,-2.0,11.0,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,13.0,inf,199.0,190.0,-2.0,1.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,8.0,inf,199.0,190.0,-2.0,11.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,9.0,inf,199.0,190.0,-2.0,11.0,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,11.0,inf,199.0,190.0,-2.0,1.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,10.0,inf,199.0,190.0,-2.0,11.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,1.0,3.0,1.0,inf,80.0,-19.0,2.0,20.0,accepted +6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,1.0,3.0,1.0,inf,83.0,-20.0,4.0,17.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,3.0,2.0,inf,81.0,-17.0,0.0,19.0,accepted +6.0,7.0,4.0,3.0,2.0,5.0,84.0,20.0,24.0,1.0,3.0,2.0,inf,84.0,-20.0,3.0,16.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,2.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,5.0,2.0,4.0,83.0,20.0,29.0,1.0,3.0,2.0,1.7149122807,83.0,-20.0,2.0,17.0,accepted +7.0,6.0,4.0,2.0,3.0,5.0,84.0,19.0,20.0,1.0,3.0,2.0,1.2850877193,84.0,-19.0,4.0,16.0,accepted +5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,1.0,3.0,3.0,inf,84.0,-19.0,1.0,16.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,3.0,3.0,inf,81.0,-17.0,0.0,19.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,3.0,inf,85.0,-19.0,5.0,15.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,3.0,3.0,inf,83.0,-19.0,0.0,17.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,3.0,4.0,inf,83.0,-19.0,0.0,17.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,3.0,4.0,inf,81.0,-17.0,0.0,19.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,4.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,4.0,5.0,86.0,19.0,17.0,1.0,3.0,5.0,inf,86.0,-19.0,5.0,14.0,accepted +5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,1.0,3.0,6.0,inf,93.0,-17.0,5.0,7.0,accepted +7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,1.0,3.0,6.0,inf,86.0,-19.0,4.0,14.0,accepted +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,1.0,3.0,7.0,inf,86.0,-15.0,2.0,14.0,accepted +4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,1.0,3.0,7.0,inf,95.0,-18.0,3.0,5.0,accepted +5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,1.0,3.0,7.0,inf,96.0,-17.0,5.0,4.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,1.0,3.0,7.0,inf,89.0,-15.0,3.0,11.0,accepted +6.0,5.0,3.0,7.0,2.0,4.0,87.0,16.0,37.0,1.0,3.0,7.0,inf,87.0,-16.0,0.0,13.0,accepted +5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,1.0,3.0,7.0,1.23333333333,94.0,-17.0,5.0,6.0,accepted +4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,1.0,3.0,7.0,0.933333333333,88.0,-16.0,1.0,12.0,accepted +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,1.0,3.0,8.0,inf,90.0,-16.0,1.0,10.0,accepted +4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,1.0,3.0,8.0,inf,98.0,-16.0,5.0,2.0,accepted +7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,1.0,3.0,8.0,inf,86.0,-13.0,0.0,14.0,accepted +7.0,3.0,5.0,2.0,6.0,4.0,90.0,13.0,23.0,1.0,3.0,8.0,inf,90.0,-13.0,3.0,10.0,accepted +4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,1.0,3.0,8.0,0.649122807018,95.0,-16.0,3.0,5.0,accepted +6.0,4.0,7.0,3.0,2.0,5.0,87.0,14.0,33.0,1.0,3.0,8.0,1.12719298246,87.0,-14.0,0.0,13.0,accepted +6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,3.0,8.0,0.907894736842,94.0,-14.0,3.0,6.0,accepted +4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,1.0,3.0,8.0,1.07456140351,89.0,-16.0,0.0,11.0,accepted +7.0,3.0,5.0,4.0,2.0,6.0,90.0,13.0,31.0,1.0,3.0,9.0,inf,90.0,-13.0,1.0,10.0,accepted +4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,1.0,3.0,9.0,inf,93.0,-16.0,1.0,7.0,accepted +6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,3.0,9.0,inf,94.0,-14.0,3.0,6.0,accepted +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,1.0,3.0,10.0,inf,94.0,-14.0,1.0,6.0,accepted +7.0,2.0,5.0,4.0,6.0,3.0,90.0,11.0,31.0,1.0,3.0,10.0,inf,90.0,-11.0,1.0,10.0,accepted +6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,3.0,10.0,inf,94.0,-12.0,3.0,6.0,accepted +7.0,2.0,5.0,4.0,3.0,6.0,93.0,11.0,31.0,1.0,3.0,11.0,inf,93.0,-11.0,1.0,7.0,accepted +6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,3.0,11.0,inf,94.0,-12.0,3.0,6.0,accepted +2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,3.0,11.0,inf,100.0,-14.0,0.0,0.0,accepted +3.0,5.0,6.0,4.0,7.0,2.0,94.0,13.0,34.0,1.0,3.0,11.0,1.95238095238,94.0,-13.0,0.0,6.0,accepted +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,1.0,3.0,12.0,inf,97.0,-12.0,3.0,3.0,accepted +5.0,3.0,7.0,2.0,4.0,6.0,96.0,11.0,29.0,1.0,3.0,12.0,inf,96.0,-11.0,1.0,4.0,accepted +2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,3.0,12.0,inf,100.0,-14.0,0.0,0.0,accepted +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,1.0,3.0,13.0,inf,97.0,-12.0,3.0,3.0,accepted +2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,3.0,13.0,inf,100.0,-14.0,0.0,0.0,accepted +6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,1.0,3.0,14.0,inf,99.0,-10.0,2.0,1.0,accepted +5.0,3.0,2.0,7.0,6.0,4.0,99.0,11.0,34.0,1.0,3.0,14.0,inf,99.0,-11.0,1.0,1.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,1.0,4.0,1.0,inf,80.0,-19.0,2.0,20.0,accepted +6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,1.0,4.0,1.0,inf,83.0,-20.0,4.0,17.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,4.0,2.0,inf,81.0,-17.0,0.0,19.0,accepted +6.0,7.0,4.0,3.0,2.0,5.0,84.0,20.0,24.0,1.0,4.0,2.0,inf,84.0,-20.0,3.0,16.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,2.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,5.0,2.0,4.0,83.0,20.0,29.0,1.0,4.0,2.0,1.7149122807,83.0,-20.0,2.0,17.0,accepted +7.0,6.0,4.0,2.0,3.0,5.0,84.0,19.0,20.0,1.0,4.0,2.0,1.2850877193,84.0,-19.0,4.0,16.0,accepted +5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,1.0,4.0,3.0,inf,84.0,-19.0,1.0,16.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,4.0,3.0,inf,81.0,-17.0,0.0,19.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,3.0,inf,85.0,-19.0,5.0,15.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,4.0,3.0,inf,83.0,-19.0,0.0,17.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,4.0,4.0,inf,83.0,-19.0,0.0,17.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,4.0,4.0,inf,81.0,-17.0,0.0,19.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,4.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,5.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,6.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,7.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,8.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,9.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,10.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,11.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,2.0,4.0,5.0,86.0,19.0,17.0,1.0,4.0,12.0,inf,86.0,-19.0,5.0,14.0,accepted +5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,1.0,4.0,13.0,inf,93.0,-17.0,5.0,7.0,accepted +7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,1.0,4.0,13.0,inf,86.0,-19.0,4.0,14.0,accepted +7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,1.0,4.0,14.0,inf,86.0,-15.0,2.0,14.0,accepted +4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,1.0,4.0,14.0,inf,95.0,-18.0,3.0,5.0,accepted +5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,1.0,4.0,14.0,inf,96.0,-17.0,5.0,4.0,accepted +6.0,5.0,3.0,7.0,2.0,4.0,87.0,16.0,37.0,1.0,4.0,14.0,inf,87.0,-16.0,0.0,13.0,accepted +7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,14.0,inf,88.0,-13.0,2.0,12.0,accepted +7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,1.0,4.0,14.0,1.4,89.0,-15.0,3.0,11.0,accepted +5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,1.0,4.0,14.0,1.1,94.0,-17.0,5.0,6.0,accepted +4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,1.0,4.0,14.0,0.7,88.0,-16.0,1.0,12.0,accepted +4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,1.0,4.0,15.0,inf,90.0,-16.0,1.0,10.0,accepted +4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,1.0,4.0,15.0,inf,98.0,-16.0,5.0,2.0,accepted +7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,1.0,4.0,15.0,inf,86.0,-13.0,0.0,14.0,accepted +7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,15.0,0.324561403509,88.0,-13.0,2.0,12.0,accepted +6.0,4.0,7.0,3.0,2.0,5.0,87.0,14.0,33.0,1.0,4.0,15.0,1.04385964912,87.0,-14.0,0.0,13.0,accepted +4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,1.0,4.0,15.0,0.324561403509,89.0,-16.0,0.0,11.0,accepted +6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,4.0,15.0,1.06578947368,94.0,-14.0,3.0,6.0,accepted +7.0,3.0,5.0,2.0,6.0,4.0,90.0,13.0,23.0,1.0,4.0,15.0,inf,90.0,-13.0,3.0,10.0,accepted +4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,1.0,4.0,15.0,1.15789473684,95.0,-16.0,3.0,5.0,accepted +4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,1.0,4.0,16.0,inf,93.0,-16.0,1.0,7.0,accepted +6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,4.0,16.0,inf,94.0,-14.0,3.0,6.0,accepted +7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,16.0,inf,88.0,-13.0,2.0,12.0,accepted +4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,1.0,4.0,17.0,inf,94.0,-14.0,1.0,6.0,accepted +6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,4.0,17.0,inf,94.0,-12.0,3.0,6.0,accepted +7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,17.0,inf,88.0,-13.0,2.0,12.0,accepted +6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,4.0,18.0,inf,94.0,-12.0,3.0,6.0,accepted +2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,4.0,18.0,inf,100.0,-14.0,0.0,0.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted +7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,0.0,5.0,1.0,inf,80.0,-19.0,2.0,20.0,accepted +6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,0.0,5.0,1.0,inf,83.0,-20.0,4.0,17.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,1.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,5.0,2.0,inf,81.0,-17.0,0.0,19.0,accepted +6.0,7.0,4.0,3.0,2.0,5.0,84.0,20.0,24.0,0.0,5.0,2.0,inf,84.0,-20.0,3.0,16.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,2.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,2.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,4.0,2.0,3.0,5.0,84.0,19.0,20.0,0.0,5.0,2.0,1.2350877193,84.0,-19.0,4.0,16.0,accepted +6.0,7.0,3.0,5.0,2.0,4.0,83.0,20.0,29.0,0.0,5.0,2.0,1.5649122807,83.0,-20.0,2.0,17.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,5.0,3.0,inf,81.0,-17.0,0.0,19.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,3.0,inf,85.0,-19.0,5.0,15.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,5.0,3.0,inf,83.0,-19.0,0.0,17.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,3.0,inf,86.0,-20.0,5.0,14.0,accepted +5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,5.0,3.0,2.06666666667,84.0,-19.0,1.0,16.0,accepted +7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,5.0,4.0,inf,81.0,-17.0,0.0,19.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,4.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,4.0,inf,86.0,-20.0,5.0,14.0,accepted +5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,5.0,4.0,inf,83.0,-19.0,0.0,17.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,5.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,5.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,6.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,6.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,7.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,7.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,8.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,8.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,9.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,9.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,10.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,10.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,11.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,11.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,12.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,12.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,13.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,13.0,inf,86.0,-20.0,5.0,14.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,14.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,14.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,15.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,15.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,16.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,16.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,17.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,17.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,18.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,18.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,20.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,19.0,inf,85.0,-19.0,5.0,15.0,accepted +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,19.0,inf,86.0,-20.0,5.0,14.0,accepted +7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,1.0,inf,85.0,-19.0,5.0,15.0,final +7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,0.0,5.0,1.0,inf,80.0,-19.0,2.0,20.0,final +6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,0.0,5.0,1.0,inf,83.0,-20.0,4.0,17.0,final +6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,1.0,inf,86.0,-20.0,5.0,14.0,final From f1ad2b3ba1a1566eaa92cce5ca56f0da446ffd54 Mon Sep 17 00:00:00 2001 From: mohammad-abdo Date: Mon, 29 Jan 2024 10:43:48 -0700 Subject: [PATCH 56/84] Minor fixes to the fitness though a list of objective and penalty weights a, b --- ravenframework/Optimizers/GeneticAlgorithm.py | 10 +++++++--- ravenframework/Optimizers/fitness/fitness.py | 17 ++++++++--------- .../continuous/unconstrained/ZDT1.xml | 2 +- .../constrained/MinwoRepMultiObjective.xml | 4 ++-- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index b450d21611..b0724f891b 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -456,14 +456,18 @@ def handleInput(self, paramInput): if self._fitnessType == 'feasibleFirst': if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: self._penaltyCoeff = fitnessNode.findFirst('b').value + self._objCoeff = fitnessNode.findFirst('a').value elif self._numOfConst == 0 and fitnessNode.findFirst('b') is not None: self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints') elif self._numOfConst != 0 and fitnessNode.findFirst('b') is None: - self._penaltyCoeff = list(np.repeat(1, self._numOfConst * len(self._objectiveVar))) #NOTE if penaltyCoeff is not provided, then assume they are all 1. + self._penaltyCoeff = [1] * self._numOfConst * len(self._objectiveVar) #list(np.repeat(1, self._numOfConst * len(self._objectiveVar))) #NOTE if penaltyCoeff is not provided, then assume they are all 1. + self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else [1] * len(self._objectiveVar) #list(np.repeat( else: - self._penaltyCoeff = list(np.repeat(0, len(self._objectiveVar))) + self._penaltyCoeff = [0] * len(self._objectiveVar) #list(np.repeat(0, len(self._objectiveVar))) + self._objCoeff = [1] * len(self._objectiveVar) else: self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None + self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. @@ -716,7 +720,7 @@ def _useRealization(self, info, rlz): # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) # 0.1 @ n-1: fitnessCalculation(rlz): Perform fitness calculation for newly obtained children (rlz) - + objInd = 1 if len(self._objectiveVar) == 1 else 2 constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint} const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index c76f6dfab3..cb7800a015 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -59,10 +59,10 @@ def invLinear(rlz,**kwargs): the farthest from violating the constraint it is, The highest negative value it have the largest the violation is. @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - a = [1.0] if kwargs['a'] == None else kwargs['a'] - b = [10.0] if kwargs['b'] == None else kwargs['b'] - penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data - objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] + a = [1.0] if kwargs['a'] == None else kwargs['a'] + b = [10.0] if kwargs['b'] == None else kwargs['b'] + penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data + objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] for j in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[j]].data) @@ -143,9 +143,8 @@ def feasibleFirst(rlz,**kwargs): 'constraintFunction', xr.Dataarray, containing all constraint functions (explicit and implicit) evaluations for the whole population @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - optType = kwargs['type'] objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] - a = 1.0 if kwargs['a'] == None else kwargs['a'] + a = kwargs['a'] if kwargs['constraintNum'] == 0: pen = kwargs['b'] else: @@ -160,11 +159,11 @@ def feasibleFirst(rlz,**kwargs): fitness = [] for ind in range(data.size): if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0): - fit=(a*data[ind]) + fit=(a[i]*data[ind]) else: - fit = a*worstObj + fit = a[i]*worstObj for constInd,_ in enumerate(g['Constraint'].data): - fit = a*fit + objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." + fit = a[i]*fit + objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty." if len(kwargs['type']) == 1: fitness.append(-1*fit) else: diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml index d100a928b0..7db423e7b7 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml @@ -114,7 +114,7 @@ trajID - x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted + x1,x2,x3,obj1,obj2,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,accepted diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index 1483ffa8f5..1674fbf387 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -60,7 +60,7 @@ - 5 + 20 42 every min, max, min @@ -77,7 +77,7 @@ - 50, 50, 100, 100, 100, 150 + rankNcrowdingBased From d23ef44f65120744c3a58ce19990ec84a38d4674 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Mon, 29 Jan 2024 14:02:19 -0700 Subject: [PATCH 57/84] test file for multi-objective optimization changed: the number of iterations 20 -> 5 --- .../discrete/constrained/MinwoRepMultiObjective.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index 1674fbf387..2a525734e3 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -60,7 +60,7 @@ - 20 + 5 42 every min, max, min From f564f296e100af7a3c2f3a113be48345e3f46998 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Mon, 4 Mar 2024 16:23:53 -0700 Subject: [PATCH 58/84] devel is merged with enabling MinMaxList_vf_desk. --- ravenframework/Optimizers/GeneticAlgorithm.py | 80 +++++++++---------- .../constrained/MinwoRepMultiObjective.xml | 4 +- .../Optimizers/GeneticAlgorithms/tests | 10 --- 3 files changed, 39 insertions(+), 55 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 7e320754ac..ab9e3720da 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -76,7 +76,6 @@ def __init__(self): self._requiredPersistence = 0 # consecutive persistence required to mark convergence self.needDenormalized() # the default in all optimizers is to normalize the data which is not the case here self.batchId = 0 -<<<<<<< HEAD self.population = None # panda Dataset container containing the population at the beginning of each generation iteration self.popAge = None # population age self.fitness = None # population fitness @@ -85,6 +84,7 @@ def __init__(self): self.crowdingDistance = None # population crowding distance (for Multi-objective optimization only) self.ahdp = np.NaN # p-Average Hausdorff Distance between populations self.ahd = np.NaN # Hausdorff Distance between populations + self.hdsm = np.NaN # Hausdorff Distance Similarity metric between populations self.bestPoint = None # the best solution (chromosome) found among population in a specific batchId self.bestFitness = None # fitness value of the best solution found self.bestObjective = None # objective value of the best solution found @@ -118,38 +118,38 @@ def __init__(self): self._fitnessInstance = None # instance of fitness self._repairInstance = None # instance of repair self._canHandleMultiObjective = True # boolean indicator whether optimization is a sinlge-objective problem or a multi-objective problem -======= - self.population = None # panda Dataset container containing the population at the beginning of each generation iteration - self.popAge = None # population age - self.fitness = None # population fitness - self.ahdp = np.NaN # p-Average Hausdorff Distance between populations - self.ahd = np.NaN # Hausdorff Distance between populations - self.hdsm = np.NaN # Hausdorff Distance Similarity metric between populations - self.bestPoint = None - self.bestFitness = None - self.bestObjective = None - self.objectiveVal = None - self._populationSize = None - self._parentSelectionType = None - self._parentSelectionInstance = None - self._nParents = None - self._nChildren = None - self._crossoverType = None - self._crossoverPoints = None - self._crossoverProb = None - self._crossoverInstance = None - self._mutationType = None - self._mutationLocs = None - self._mutationProb = None - self._mutationInstance = None - self._survivorSelectionType = None - self._survivorSelectionInstance = None - self._fitnessType = None - self._objCoeff = None - self._penaltyCoeff = None - self._fitnessInstance = None - self._repairInstance = None ->>>>>>> origin/devel +# ======= +# self.population = None # panda Dataset container containing the population at the beginning of each generation iteration +# self.popAge = None # population age +# self.fitness = None # population fitness +# self.ahdp = np.NaN # p-Average Hausdorff Distance between populations +# self.ahd = np.NaN # Hausdorff Distance between populations +# self.hdsm = np.NaN # Hausdorff Distance Similarity metric between populations +# self.bestPoint = None +# self.bestFitness = None +# self.bestObjective = None +# self.objectiveVal = None +# self._populationSize = None +# self._parentSelectionType = None +# self._parentSelectionInstance = None +# self._nParents = None +# self._nChildren = None +# self._crossoverType = None +# self._crossoverPoints = None +# self._crossoverProb = None +# self._crossoverInstance = None +# self._mutationType = None +# self._mutationLocs = None +# self._mutationProb = None +# self._mutationInstance = None +# self._survivorSelectionType = None +# self._survivorSelectionInstance = None +# self._fitnessType = None +# self._objCoeff = None +# self._penaltyCoeff = None +# self._fitnessInstance = None +# self._repairInstance = None +# >>>>>>> origin/devel ########################## # Initialization Methods # @@ -943,7 +943,6 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info): # NOTE: the solution export needs to be updated BEFORE we run rejectOptPoint or extend the opt # point history. if self._writeSteps == 'every': -<<<<<<< HEAD for i in range(rlz.sizes['RAVEN_sample_ID']): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) @@ -952,10 +951,6 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info): for ind, consName in enumerate(g['Constraint'].values): rlzDict['ConstraintEvaluation_'+consName] = g[i,ind] self._updateSolutionExport(traj, rlzDict, acceptable, None) -======= - self._solutionExportUtilityUpdate(traj, rlz, fitness, g, acceptable) - ->>>>>>> origin/devel # decide what to do next if acceptable in ['accepted', 'first']: # record history @@ -1478,12 +1473,11 @@ def _addToSolutionExport(self, traj, rlz, acceptable): # 'fitness': rlz['fitness'], 'AHDp': self.ahdp, 'AHD': self.ahd, -<<<<<<< HEAD 'rank': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['rank'], - 'CD': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['CD']} -======= - 'HDSM': self.hdsm} ->>>>>>> origin/devel + 'CD': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['CD'], + 'HDSM': self.hdsm + } + for var, val in self.constants.items(): toAdd[var] = val diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index 2a525734e3..740f63510c 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -9,9 +9,9 @@ - Multi_MinwoReplacement_wo_constraints_min_max_min_50_20 + Multi_MinwoReplacement/ optimize,print - 4 + 1 diff --git a/tests/framework/Optimizers/GeneticAlgorithms/tests b/tests/framework/Optimizers/GeneticAlgorithms/tests index 27364f32fb..1b8df1ba6a 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/tests +++ b/tests/framework/Optimizers/GeneticAlgorithms/tests @@ -374,8 +374,6 @@ rel_err = 0.001 [../] [../] - -<<<<<<< HEAD [./NSGA-II_MinwoRepMultiObjective] type = 'RavenFramework' input = 'discrete/constrained/MinwoRepMultiObjective.xml' @@ -392,14 +390,6 @@ [./csv] type = OrderedCSV output = 'continuous/unconstrained/ZDT1/opt_export_0.csv' -======= - [./GAwithEnsembleModelHDSMconvergence] - type = 'RavenFramework' - input = 'continuous/unconstrained/test_ensemble_withGA_HDSM.xml' - [./csv] - type = OrderedCSV - output = 'continuous/unconstrained/ensemble_withGA_HDSM/opt_export.csv' ->>>>>>> origin/devel rel_err = 0.001 [../] [../] From 02a961ed397be317b0838b8f09bc5e2f0c9be84f Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Tue, 5 Mar 2024 20:51:03 -0700 Subject: [PATCH 59/84] Modifications are done: All unit tests and GeneticAlgorithms-related regression tests are passed. simpleKnapsackTournament has differ, but final results were identical. It is regolded. Multi_MinwoReplacement is also regoldened. --- ravenframework/Optimizers/GeneticAlgorithm.py | 38 +---- ravenframework/Optimizers/fitness/fitness.py | 8 +- .../parentSelectors/parentSelectors.py | 8 +- .../survivorSelectors/survivorSelectors.py | 7 +- .../Multi_MinwoReplacement/opt_export_0.csv | 80 +++++----- .../PrintOptOut_1.csv | 142 +++++++++--------- .../Optimizers/GeneticAlgorithms/tests | 30 ++-- .../MultiSumwConst/MinwoRepMultiObjective.xml | 2 +- .../unit_tests/Optimizers/testFitnessBased.py | 17 ++- .../Optimizers/testRankSelection.py | 5 +- .../Optimizers/testRouletteWheel.py | 2 + .../Optimizers/testTournamentSelection.py | 7 +- 12 files changed, 172 insertions(+), 174 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index ab9e3720da..2b70fd20b9 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -118,38 +118,6 @@ def __init__(self): self._fitnessInstance = None # instance of fitness self._repairInstance = None # instance of repair self._canHandleMultiObjective = True # boolean indicator whether optimization is a sinlge-objective problem or a multi-objective problem -# ======= -# self.population = None # panda Dataset container containing the population at the beginning of each generation iteration -# self.popAge = None # population age -# self.fitness = None # population fitness -# self.ahdp = np.NaN # p-Average Hausdorff Distance between populations -# self.ahd = np.NaN # Hausdorff Distance between populations -# self.hdsm = np.NaN # Hausdorff Distance Similarity metric between populations -# self.bestPoint = None -# self.bestFitness = None -# self.bestObjective = None -# self.objectiveVal = None -# self._populationSize = None -# self._parentSelectionType = None -# self._parentSelectionInstance = None -# self._nParents = None -# self._nChildren = None -# self._crossoverType = None -# self._crossoverPoints = None -# self._crossoverProb = None -# self._crossoverInstance = None -# self._mutationType = None -# self._mutationLocs = None -# self._mutationProb = None -# self._mutationInstance = None -# self._survivorSelectionType = None -# self._survivorSelectionInstance = None -# self._fitnessType = None -# self._objCoeff = None -# self._penaltyCoeff = None -# self._fitnessInstance = None -# self._repairInstance = None -# >>>>>>> origin/devel ########################## # Initialization Methods # @@ -497,7 +465,7 @@ def handleInput(self, paramInput): if self._fitnessType == 'feasibleFirst': if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: self._penaltyCoeff = fitnessNode.findFirst('b').value - self._objCoeff = fitnessNode.findFirst('a').value + self._objCoeff = fitnessNode.findFirst('a').value elif self._numOfConst == 0 and fitnessNode.findFirst('b') is not None: self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints') elif self._numOfConst != 0 and fitnessNode.findFirst('b') is None: @@ -761,8 +729,8 @@ def _useRealization(self, info, rlz): # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) # 0.1 @ n-1: fitnessCalculation(rlz): Perform fitness calculation for newly obtained children (rlz) - - objInd = 1 if len(self._objectiveVar) == 1 else 2 + + objInd = int(len(self._objectiveVar)>1) + 1 #if len(self._objectiveVar) == 1 else 2 constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint} const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint) traj, g, objectiveVal, offSprings, offSpringFitness = const(self, info, rlz) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index f813dd34fe..4ba2cf1c4b 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -65,10 +65,10 @@ def invLinear(rlz,**kwargs): objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] for j in range(len(objVar)): data = np.atleast_1d(rlz[objVar][objVar[j]].data) - fitness = -a[0] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[0] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1) + fitness = -a[j] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[j] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1) fitness = xr.DataArray(np.squeeze(fitness), - dims=['chromosome'], - coords={'chromosome': np.arange(len(data))}) + dims=['chromosome'], + coords={'chromosome': np.arange(len(data))}) if j == 0: fitnessSet = fitness.to_dataset(name = objVar[j]) else: @@ -109,7 +109,7 @@ def feasibleFirst(rlz,**kwargs): @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar'] - a = 1.0 if kwargs['a'] == None else kwargs['a'] + a = [1.0]*len(objVar) if kwargs['a'] == None else kwargs['a'] if kwargs['constraintNum'] == 0: pen = kwargs['b'] else: diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py index 2a38cc3bed..34fc2c2725 100644 --- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py +++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py @@ -48,6 +48,7 @@ def rouletteWheel(population,**kwargs): # Arguments pop = population fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) + # fitness = kwargs['fitness'].data nParents= kwargs['nParents'] # if nparents = population size then do nothing (whole population are parents) if nParents == pop.shape[0]: @@ -112,8 +113,8 @@ def tournamentSelection(population,**kwargs): fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist]) for i in range(nParents): matrixOperationRaw = np.zeros((kSelect,2)) - selectChromoIndexes = list(np.arange(kSelect)) - selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None) + selectChromoIndexes = list(np.arange(len(pop))) #NOTE: JYK - selectChromoIndexes should cover all chromosomes in population. + selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None) #NOTE: JYK - randomly select several indices with size of kSelect. matrixOperationRaw[:,0] = selectedChromo matrixOperationRaw[:,1] = np.transpose(fitness[selectedChromo]) tournamentWinnerIndex = int(matrixOperationRaw[np.argmax(matrixOperationRaw[:,1]),0]) @@ -156,7 +157,7 @@ def rankSelection(population,**kwargs): index = np.arange(0,pop.shape[0]) rank = np.arange(0,pop.shape[0]) - data = np.vstack((fitness,index)) + data = np.vstack((np.array(fitness.variables['test_RankSelection']),index)) dataOrderedByDecreasingFitness = data[:,(-data[0]).argsort()] dataOrderedByDecreasingFitness[0,:] = rank dataOrderedByIncreasingPos = dataOrderedByDecreasingFitness[:,dataOrderedByDecreasingFitness[1].argsort()] @@ -166,6 +167,7 @@ def rankSelection(population,**kwargs): dims=['chromosome'], coords={'chromosome': np.arange(np.shape(orderedRank)[0])}) + rank = rank.to_dataset(name = 'test_RankSelection') selectedParent = rouletteWheel(population, fitness=rank , nParents=kwargs['nParents'],variables=kwargs['variables']) return selectedParent diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index 9702a91192..d2f2c227dd 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -129,7 +129,8 @@ def fitnessBased(newRlz,**kwargs): newFitness = xr.DataArray(newFitness, dims=['chromosome'], coords={'chromosome':np.arange(np.shape(newFitness)[0])}) - newFitness = newFitness.to_dataset(name = list(kwargs['fitness'].keys())[0]) + # newFitness = newFitness.to_dataset(name = list(kwargs['fitness'].keys())[0]) + newFitness = newFitness.to_dataset(name = list(kwargs['variables'])[0]) #return newPopulationArray,newFitness,newAge return newPopulationArray,newFitness,newAge,kwargs['popObjectiveVal'] @@ -162,7 +163,7 @@ def rankNcrowdingBased(offsprings, **kwargs): popFitArray = [] offFit = kwargs['offFit'] offFitArray = [] - for i in list(popFit.keys()): #NOTE popFit.keys() and offFit.keys() must be same. + for i in list(popFit.keys()): #NOTE popFit.keys() and offFit.keys() must be same. popFitArray.append(popFit[i].data.tolist()) offFitArray.append(offFit[i].data.tolist()) @@ -189,7 +190,7 @@ def rankNcrowdingBased(offsprings, **kwargs): newAge = list(map(lambda x:x+1, popAge)) newPopulationMerged = np.concatenate([population,offSprings]) newAge.extend([0]*len(offSprings)) - + sortedRank,sortedCD,sortedAge,sortedPopulation,sortedFit,sortedObjectives,sortedConstV = \ zip(*[(x,y,z,i,j,k,a) for x,y,z,i,j,k,a in \ sorted(zip(newPopRank.data, newPopCD.data, newAge, newPopulationMerged.tolist(), newFitMerged_pair, newObjectivesMerged_pair, newConstVMerged),reverse=False,key=lambda x: (x[0], -x[1]))]) diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv index 74d8849c58..ff6c528364 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv @@ -1,54 +1,54 @@ x1,x2,x3,x4,x5,x6,obj1,obj2,obj3,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,accepted -4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,1.0,14.0,inf,210.0,193.0,-2.0,1.0,first +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,1.0,14.0,inf,112.0,-5.0,-2.0,1.0,first 7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,0.0,1.0,3.0,1.6,89.0,-15.0,3.0,11.0,first -4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,46.0,0.0,1.0,18.0,inf,260.0,293.0,-3.0,4.0,first +4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,46.0,0.0,1.0,18.0,inf,113.0,-4.0,-3.0,4.0,first 7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,0.0,1.0,3.0,inf,86.0,-15.0,2.0,14.0,first 4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,0.0,1.0,4.0,inf,90.0,-16.0,1.0,10.0,first 4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,0.0,1.0,4.0,0.899122807018,95.0,-16.0,3.0,5.0,first 4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,0.0,1.0,3.0,inf,95.0,-18.0,3.0,5.0,first +6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,1.0,4.0,inf,97.0,-12.0,3.0,3.0,first 4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,0.0,1.0,5.0,1.59523809524,94.0,-14.0,1.0,6.0,first -2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,38.0,0.0,1.0,24.0,inf,410.0,593.0,-1.0,-5.0,first +2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,38.0,0.0,1.0,25.0,inf,116.0,-1.0,-1.0,-5.0,first 7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,1.0,1.0,inf,85.0,-19.0,5.0,15.0,first -6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,1.0,4.0,inf,97.0,-12.0,3.0,3.0,first -6.0,2.0,7.0,5.0,3.0,4.0,90.0,10.0,41.0,0.0,1.0,15.0,inf,210.0,193.0,-2.0,10.0,first -3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,1.0,8.0,inf,160.0,93.0,-1.0,8.0,first -2.0,3.0,6.0,7.0,4.0,5.0,104.0,8.0,46.0,0.0,1.0,26.0,inf,460.0,693.0,-3.0,-4.0,first +6.0,2.0,7.0,5.0,3.0,4.0,90.0,10.0,41.0,0.0,1.0,15.0,inf,112.0,-5.0,-2.0,10.0,first +3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,1.0,8.0,inf,111.0,-6.0,-1.0,8.0,first +2.0,3.0,6.0,7.0,4.0,5.0,104.0,8.0,46.0,0.0,1.0,26.0,inf,117.0,0.0,-3.0,-4.0,first 7.0,3.0,5.0,4.0,2.0,6.0,90.0,13.0,31.0,0.0,1.0,5.0,inf,90.0,-13.0,1.0,10.0,first -5.0,3.0,2.0,6.0,4.0,7.0,103.0,11.0,30.0,0.0,1.0,22.0,inf,260.0,293.0,2.0,-3.0,first -6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,1.0,9.0,inf,160.0,93.0,-1.0,8.0,first -2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,1.0,13.0,inf,160.0,93.0,0.0,-1.0,first +5.0,3.0,2.0,6.0,4.0,7.0,103.0,11.0,30.0,0.0,1.0,19.0,inf,113.0,-4.0,2.0,-3.0,first +6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,1.0,9.0,inf,111.0,-6.0,-1.0,8.0,first +2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,1.0,10.0,inf,111.0,-6.0,0.0,-1.0,first 4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,0.0,1.0,5.0,inf,93.0,-16.0,1.0,7.0,first 5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,0.0,1.0,2.0,inf,93.0,-17.0,5.0,7.0,first -3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,1.0,10.0,inf,160.0,93.0,-1.0,11.0,first -3.0,6.0,4.0,2.0,5.0,7.0,102.0,15.0,20.0,0.0,1.0,17.0,inf,210.0,193.0,4.0,-2.0,first +3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,1.0,11.0,inf,111.0,-6.0,-1.0,11.0,first +3.0,6.0,4.0,2.0,5.0,7.0,102.0,15.0,20.0,0.0,1.0,16.0,inf,112.0,-5.0,4.0,-2.0,first 6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,0.0,1.0,5.0,inf,99.0,-10.0,2.0,1.0,first -4.0,3.0,2.0,6.0,5.0,7.0,107.0,10.0,30.0,0.0,1.0,27.0,inf,460.0,693.0,2.0,-7.0,first -7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,1.0,11.0,inf,160.0,93.0,-1.0,12.0,first -5.0,3.0,6.0,7.0,2.0,4.0,91.0,11.0,46.0,0.0,1.0,20.0,inf,260.0,293.0,-3.0,9.0,first -4.0,2.0,3.0,5.0,6.0,7.0,109.0,8.0,29.0,0.0,1.0,29.0,inf,560.0,893.0,2.0,-9.0,first -5.0,3.0,6.0,7.0,4.0,2.0,89.0,11.0,46.0,0.0,1.0,21.0,inf,260.0,293.0,-3.0,11.0,first -2.0,3.0,5.0,6.0,4.0,7.0,109.0,8.0,39.0,0.0,1.0,30.0,inf,610.0,993.0,-1.0,-9.0,first -3.0,2.0,7.0,4.0,6.0,5.0,104.0,7.0,37.0,0.0,1.0,23.0,inf,360.0,493.0,-1.0,-4.0,first +4.0,3.0,2.0,6.0,5.0,7.0,107.0,10.0,30.0,0.0,1.0,27.0,inf,117.0,0.0,2.0,-7.0,first +7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,1.0,12.0,inf,111.0,-6.0,-1.0,12.0,first +5.0,3.0,6.0,7.0,2.0,4.0,91.0,11.0,46.0,0.0,1.0,21.0,inf,113.0,-4.0,-3.0,9.0,first +4.0,2.0,3.0,5.0,6.0,7.0,109.0,8.0,29.0,0.0,1.0,29.0,inf,119.0,2.0,2.0,-9.0,first +5.0,3.0,6.0,7.0,4.0,2.0,89.0,11.0,46.0,0.0,1.0,22.0,inf,113.0,-4.0,-3.0,11.0,first +2.0,3.0,5.0,6.0,4.0,7.0,109.0,8.0,39.0,0.0,1.0,31.0,inf,120.0,3.0,-1.0,-9.0,first +3.0,2.0,7.0,4.0,6.0,5.0,104.0,7.0,37.0,0.0,1.0,23.0,inf,115.0,-2.0,-1.0,-4.0,first 5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,1.0,1.0,0.907894736842,83.0,-19.0,0.0,17.0,first 3.0,5.0,6.0,4.0,7.0,2.0,94.0,13.0,34.0,0.0,1.0,6.0,inf,94.0,-13.0,0.0,6.0,first 7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,0.0,1.0,2.0,inf,86.0,-19.0,4.0,14.0,first 7.0,2.0,5.0,4.0,6.0,3.0,90.0,11.0,31.0,0.0,1.0,6.0,inf,90.0,-11.0,1.0,10.0,first -3.0,4.0,2.0,6.0,7.0,5.0,106.0,11.0,30.0,0.0,1.0,25.0,inf,410.0,593.0,2.0,-6.0,first +3.0,4.0,2.0,6.0,7.0,5.0,106.0,11.0,30.0,0.0,1.0,24.0,inf,116.0,-1.0,2.0,-6.0,first 4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,0.0,1.0,4.0,inf,98.0,-16.0,5.0,2.0,first 4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,0.0,1.0,4.0,1.45175438596,89.0,-16.0,0.0,11.0,first 5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,0.0,1.0,3.0,1.43333333333,94.0,-17.0,5.0,6.0,first 7.0,2.0,5.0,4.0,3.0,6.0,93.0,11.0,31.0,0.0,1.0,7.0,inf,93.0,-11.0,1.0,7.0,first 7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,0.0,1.0,4.0,inf,86.0,-13.0,0.0,14.0,first 5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,1.0,2.0,inf,83.0,-19.0,0.0,17.0,first -5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,1.0,12.0,inf,160.0,93.0,-1.0,6.0,first -3.0,2.0,5.0,6.0,4.0,7.0,108.0,7.0,39.0,0.0,1.0,28.0,inf,560.0,893.0,-1.0,-8.0,first +5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,1.0,13.0,inf,111.0,-6.0,-1.0,6.0,first +3.0,2.0,5.0,6.0,4.0,7.0,108.0,7.0,39.0,0.0,1.0,28.0,inf,119.0,2.0,-1.0,-8.0,first 7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,1.0,1.0,inf,81.0,-17.0,0.0,19.0,first -2.0,4.0,7.0,5.0,6.0,3.0,99.0,10.0,41.0,0.0,1.0,16.0,inf,210.0,193.0,-2.0,1.0,first +2.0,4.0,7.0,5.0,6.0,3.0,99.0,10.0,41.0,0.0,1.0,17.0,inf,112.0,-5.0,-2.0,1.0,first 4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,0.0,1.0,3.0,inf,88.0,-16.0,1.0,12.0,first 5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,0.0,1.0,3.0,inf,96.0,-17.0,5.0,4.0,first 5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,1.0,1.0,2.5,84.0,-19.0,1.0,16.0,first -4.0,3.0,7.0,6.0,2.0,5.0,95.0,10.0,45.0,0.0,1.0,19.0,inf,260.0,293.0,-3.0,5.0,first -2.0,4.0,3.0,5.0,7.0,6.0,110.0,10.0,29.0,0.0,1.0,31.0,inf,610.0,993.0,2.0,-10.0,first +4.0,3.0,7.0,6.0,2.0,5.0,95.0,10.0,45.0,0.0,1.0,20.0,inf,113.0,-4.0,-3.0,5.0,first +2.0,4.0,3.0,5.0,7.0,6.0,110.0,10.0,29.0,0.0,1.0,30.0,inf,120.0,3.0,2.0,-10.0,first 7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,2.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted 7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,2.0,1.0,inf,81.0,-17.0,0.0,19.0,accepted 5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,2.0,1.0,1.5,84.0,-19.0,1.0,16.0,accepted @@ -84,21 +84,21 @@ x1,x2,x3,x4,x5,x6,obj1,obj2,obj3,age,batchId,rank,CD,FitnessEvaluation_obj1,Fitn 6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,2.0,8.0,inf,97.0,-12.0,3.0,3.0,accepted 6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,0.0,2.0,10.0,inf,99.0,-10.0,2.0,1.0,accepted 5.0,3.0,2.0,7.0,6.0,4.0,99.0,11.0,34.0,0.0,2.0,10.0,1.72916666667,99.0,-11.0,1.0,1.0,accepted -3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,2.0,6.0,inf,160.0,93.0,-1.0,8.0,accepted -6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,2.0,7.0,1.25,160.0,93.0,-1.0,8.0,accepted -3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,2.0,5.0,1.0,160.0,93.0,-1.0,11.0,accepted -7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,2.0,5.0,inf,160.0,93.0,-1.0,12.0,accepted -5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,2.0,8.0,inf,160.0,93.0,-1.0,6.0,accepted -2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,2.0,10.0,inf,160.0,93.0,0.0,-1.0,accepted -6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,6.0,inf,199.0,190.0,-2.0,11.0,accepted -4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,12.0,inf,199.0,190.0,-2.0,1.0,accepted -6.0,4.0,5.0,7.0,2.0,3.0,85.0,14.0,43.0,0.0,2.0,3.0,inf,199.0,190.0,-2.0,15.0,accepted -6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,7.0,inf,199.0,190.0,-2.0,11.0,accepted -4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,13.0,inf,199.0,190.0,-2.0,1.0,accepted -6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,8.0,inf,199.0,190.0,-2.0,11.0,accepted -6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,9.0,inf,199.0,190.0,-2.0,11.0,accepted -4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,11.0,inf,199.0,190.0,-2.0,1.0,accepted -6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,10.0,inf,199.0,190.0,-2.0,11.0,accepted +3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,2.0,6.0,inf,111.0,-6.0,-1.0,8.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,6.0,inf,101.0,-8.0,-2.0,11.0,accepted +6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,2.0,7.0,1.25,111.0,-6.0,-1.0,8.0,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,13.0,inf,101.0,-8.0,-2.0,1.0,accepted +2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,2.0,10.0,inf,111.0,-6.0,0.0,-1.0,accepted +6.0,4.0,5.0,7.0,2.0,3.0,85.0,14.0,43.0,0.0,2.0,3.0,inf,101.0,-8.0,-2.0,15.0,accepted +3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,2.0,5.0,1.0,111.0,-6.0,-1.0,11.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,7.0,inf,101.0,-8.0,-2.0,11.0,accepted +7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,2.0,5.0,inf,111.0,-6.0,-1.0,12.0,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,12.0,inf,101.0,-8.0,-2.0,1.0,accepted +5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,2.0,8.0,inf,111.0,-6.0,-1.0,6.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,8.0,inf,101.0,-8.0,-2.0,11.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,9.0,inf,101.0,-8.0,-2.0,11.0,accepted +4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,11.0,inf,101.0,-8.0,-2.0,1.0,accepted +6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,10.0,inf,101.0,-8.0,-2.0,11.0,accepted 7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted 7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,1.0,3.0,1.0,inf,80.0,-19.0,2.0,20.0,accepted 6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,1.0,3.0,1.0,inf,83.0,-20.0,4.0,17.0,accepted diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv index a5bd67a05d..d37b1dec07 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv +++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv @@ -1,71 +1,71 @@ -proj1,proj2,proj3,proj4,proj5,proj6,proj7,proj8,proj9,proj10,planValue,validPlan,ProbabilityWeight-proj4,ProbabilityWeight-proj6,PointProbability,ProbabilityWeight-proj8,ProbabilityWeight-proj1,prefix,ProbabilityWeight-proj3,ProbabilityWeight-proj2,ProbabilityWeight-proj5,ProbabilityWeight-proj9,ProbabilityWeight,batchId,ProbabilityWeight-proj7,ProbabilityWeight-proj10 -0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,1,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,2,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,3,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,2,1.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,4,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 -0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,5,0.5,0.5 +proj1,proj2,proj3,proj4,proj5,proj6,proj7,proj8,proj9,proj10,planValue,validPlan,ProbabilityWeight-proj7,ProbabilityWeight-proj4,PointProbability,ProbabilityWeight,ProbabilityWeight-proj6,ProbabilityWeight-proj9,ProbabilityWeight-proj2,ProbabilityWeight-proj1,ProbabilityWeight-proj8,batchId,ProbabilityWeight-proj5,ProbabilityWeight-proj10,ProbabilityWeight-proj3,prefix +0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,-2,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,2,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 +0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5 diff --git a/tests/framework/Optimizers/GeneticAlgorithms/tests b/tests/framework/Optimizers/GeneticAlgorithms/tests index 1b8df1ba6a..83345c7836 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/tests +++ b/tests/framework/Optimizers/GeneticAlgorithms/tests @@ -377,20 +377,30 @@ [./NSGA-II_MinwoRepMultiObjective] type = 'RavenFramework' input = 'discrete/constrained/MinwoRepMultiObjective.xml' - [./csv] - type = OrderedCSV - output = 'discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv' - rel_err = 0.001 - [../] + # [./csv] + # type = UnorderedCSV + UnorderedCsv = 'discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv' + rel_err = 0.001 + # [../] [../] [./NSGA-II_ZDT1] type = 'RavenFramework' input = 'continuous/unconstrained/ZDT1.xml' - [./csv] - type = OrderedCSV - output = 'continuous/unconstrained/ZDT1/opt_export_0.csv' - rel_err = 0.001 - [../] + # [./csv] + # type = OrderedCSV + UnorderedCsv = 'continuous/unconstrained/ZDT1/opt_export_0.csv' + rel_err = 0.001 + # [../] + [../] + + [./NSGA-II_Beale] + type = 'RavenFramework' + input = 'continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml' + # [./csv] + # type = OrderedCSV + UnorderedCsv = 'continuous/unconstrained/Multi_beale_bealeFlipped/opt_export_0.csv' + rel_err = 0.001 + # [../] [../] [] diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml index 700d248103..d961e60adf 100644 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml @@ -11,7 +11,7 @@ Multi_MinwoReplacement_Figure_720 optimize,print - 2 + 1 diff --git a/tests/framework/unit_tests/Optimizers/testFitnessBased.py b/tests/framework/unit_tests/Optimizers/testFitnessBased.py index 0084aa028b..5fa79c77d0 100644 --- a/tests/framework/unit_tests/Optimizers/testFitnessBased.py +++ b/tests/framework/unit_tests/Optimizers/testFitnessBased.py @@ -92,9 +92,14 @@ def formatSample(vars): popFitness = xr.DataArray(popFitness, dims=['chromosome'], coords={'chromosome': np.arange(np.shape(popFitness)[0])}) +popFitnessSet = popFitness.to_dataset(name = "test_popFitness") popAge = [3,1,7,1] offSprings = [[2,3,4,5,6,1],[1,3,5,2,4,6],[1,2,4,3,6,5]] offSpringsFitness = [1.1,2.0,3.2] +offSpringsFitness = xr.DataArray(offSpringsFitness, + dims=['chromosome'], + coords={'chromosome': np.arange(np.shape(offSpringsFitness)[0])}) +offSpringsFitnessSet = offSpringsFitness.to_dataset(name = "test_offFitness") rlz =[] for i in range(np.shape(offSprings)[0]): d = {} @@ -104,10 +109,11 @@ def formatSample(vars): d[var] = {'dims':() ,'data': val} rlz.append(xr.Dataset.from_dict(d)) rlz = xr.concat(rlz,dim='data') -newPop2,newFit2,newAge2,popFitness2 = fitnessBased(rlz, age=popAge, variables=optVars, population=population, fitness=popFitness, offSpringsFitness=offSpringsFitness, popObjectiveVal=popFitness) +newPop2,newFit2,newAge2,popFitness2 = fitnessBased(rlz, age=popAge, variables=optVars, population=population, fitness=popFitnessSet, offSpringsFitness=offSpringsFitnessSet, popObjectiveVal=popFitness) +print('*'*39) print('Fitness Based Selection') -print('*'*19) -print('new population: {}, \n new Fitness {}, \n new age'.format(newPop2,newFit2,newAge2)) +print('*'*39) +print('1. New population:\n {}, \n2. New Fitness:\n {}, \n3. New age:\n'.format(newPop2.data,newFit2.to_dataarray(dim = 'variable', name = None)[0],newAge2)) print('Note that the second and forth chromosome had the same age, but for the age based mechanism it omitted the one with the lowest fitness') expectedPop = xr.DataArray([[6,5,4,3,2,1], [1,2,3,4,5,6], @@ -119,13 +125,16 @@ def formatSample(vars): expectedFit = xr.DataArray([9.5,7.2,3.2,2.0], dims=['chromosome'], coords={'chromosome':np.arange(np.shape(population)[0])}) + +expectedFit = expectedFit.to_dataset(name = 'x1') + expectedAge = [8,4,0,0] ## TESTING # Test survivor population checkSameDataArrays('Check survived population data array',newPop2,expectedPop) # Test survivor fitnesses -checkSameDataArrays('Check fitness for survived population data array',newFit2,expectedFit) +checkSameDataArrays('Check fitness for survived population data array',newFit2, expectedFit) # Test survivor Ages checkSameListOfInt('Check fitness for survived individuals',np.array(newAge2),np.array(expectedAge)) # diff --git a/tests/framework/unit_tests/Optimizers/testRankSelection.py b/tests/framework/unit_tests/Optimizers/testRankSelection.py index d3ecaf807e..c2ea374381 100644 --- a/tests/framework/unit_tests/Optimizers/testRankSelection.py +++ b/tests/framework/unit_tests/Optimizers/testRankSelection.py @@ -99,9 +99,12 @@ def formatSample(vars): dims=['chromosome'], coords={'chromosome': np.arange(np.shape(popFitness)[0])}) nParents = 2 +popFitness = popFitness.to_dataset(name = 'test_RankSelection') + parents = rankSelection(population, variables=optVars, fitness=popFitness, nParents=nParents) +print('*'*39) print('Rank based Parent Selection') -print('*'*19) +print('*'*39) print('selected parents are: {}'.format(parents)) expectedParents = xr.DataArray([[3,5,6,2,1,4], [1,2,3,4,5,6]], diff --git a/tests/framework/unit_tests/Optimizers/testRouletteWheel.py b/tests/framework/unit_tests/Optimizers/testRouletteWheel.py index eb5e659800..6292af6996 100644 --- a/tests/framework/unit_tests/Optimizers/testRouletteWheel.py +++ b/tests/framework/unit_tests/Optimizers/testRouletteWheel.py @@ -67,6 +67,8 @@ def checkSameDataArrays(comment, resultedDA, expectedDA, update=True): popFitness = xr.DataArray(popFitness, dims=['chromosome'], coords={'chromosome': np.arange(np.shape(popFitness)[0])}) +popFitness = popFitness.to_dataset(name = 'test_RouletteWheel') + nParents = 2 parents = rouletteWheel(population, variables=optVars, fitness=popFitness, nParents=nParents) print('Roulette Wheel Parent Selection') diff --git a/tests/framework/unit_tests/Optimizers/testTournamentSelection.py b/tests/framework/unit_tests/Optimizers/testTournamentSelection.py index edddaa8db4..b0131f60e2 100644 --- a/tests/framework/unit_tests/Optimizers/testTournamentSelection.py +++ b/tests/framework/unit_tests/Optimizers/testTournamentSelection.py @@ -94,9 +94,12 @@ def formatSample(vars): popFitness = xr.DataArray(popFitness, dims=['chromosome'], coords={'chromosome': np.arange(np.shape(popFitness)[0])}) +popFitness = popFitness.to_dataset(name = 'test_TournamentSelection') nParents = 2 -parents = tournamentSelection(population, variables=optVars, fitness=popFitness, nParents=nParents) -print('Roulette Wheel Parent Selection') +objVal = [10] +kSelection = 2 +parents = tournamentSelection(population, variables=optVars, fitness=popFitness, nParents=nParents, objVal=objVal, kSelection=kSelection) +print('Parent Selection with TournamentSelection algorithm') print('*'*19) print('selected parents are: {}'.format(parents)) expectedParents = xr.DataArray([[1,2,3,4,5,6], From ed460f909c31409de642f0adfe2dbdd46b210959 Mon Sep 17 00:00:00 2001 From: JunyungKim Date: Tue, 5 Mar 2024 21:31:04 -0700 Subject: [PATCH 60/84] SimulateData.py is now identical with the one from devel branch. --- .../SIMULATE3/SimulateData.py | 44 +------------------ 1 file changed, 2 insertions(+), 42 deletions(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index 18044a736b..817a7d5381 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -43,8 +43,6 @@ def __init__(self,filen): self.data["PinPowerPeaking"] = self.pinPeaking() self.data["exposure"] = self.burnupEOC() self.data["assembly_power"] = self.assemblyPeakingFactors() - self.data["fuel_type"] = self.fa_type() -# self.data["pin_peaking"] = self.pinPeaking() # this is a dummy variable for demonstration with MOF # check if something has been found if all(v is None for v in self.data.values()): @@ -213,7 +211,7 @@ def EOCEFPD(self): if not list_: return ValueError("No values returned. Check Simulate File executed correctly") else: - outputDict = {'info_ids':['MaxEFPD'], 'values': [list_[-1]]} + outputDict = {'info_ids':['MaxEFPD'], 'values': [list_[-1]] } return outputDict @@ -488,45 +486,6 @@ def burnupEOC(self): return outputDict - def fa_type(self): - ''' - Extracts the fuel type and calculates the fuel cost based on the amount and enrichment of each fuel type. - ''' - #fuel_type = [] - FAlist = [] - for line in self.lines: - if "'FUE.TYP'" in line: - p1 = line.index(",") - p2 = line.index("/") - search_space = line[p1:p2] - search_space = search_space.replace(",","") - tmp= search_space.split() - for ii in tmp: - FAlist.append(float(ii)) - FAtype = list(set(FAlist)) - FAlist_A = FAlist[0] - FAlist_B = FAlist[1:9] + FAlist[9:73:9] - FAlist_C = FAlist[10:18] + FAlist[19:27] + FAlist[28:36] + FAlist[37:45] + FAlist[46:54] + FAlist[55:63] + FAlist[64:72] + FAlist[73:81] - FAcount_A = [float(fa == FAlist_A) for fa in FAtype] - FAcount_B = [float(FAlist_B.count(fa)*2) for fa in FAtype] - FAcount_C = [float(FAlist_C.count(fa)*4) for fa in FAtype] - FAcount = [FAcount_A[j] + FAcount_B[j] + FAcount_C[j] for j in range(len(FAtype))] - print(FAcount) - #stop - #Considering that: FA type 0 is empty, type 1 reflector, type 2 2% enrichment, types 3 and 4 2.5% enrichment, and types 5 and 6 3.2% enrichment. The cost of burnable is not being considered - if len(FAcount) == 7: - fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*2.69520839 + (FAcount[3] + FAcount[4])*3.24678409 + (FAcount[5] + FAcount[6])*4.03739539 - else: - fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*2.69520839 + (FAcount[3] + FAcount[4])*3.24678409 + (FAcount[5])*4.03739539 - print(fuel_cost) - #fuel_type.append(float(search_space)) - #stop - if not fuel_cost: - return ValueError("No values returned. Check Simulate File executed correctly") - else: - outputDict = {'info_ids':['fuel_cost'], 'values': [fuel_cost]} - return outputDict - def writeCSV(self, fileout): """ Print Data into CSV format @@ -546,3 +505,4 @@ def writeCSV(self, fileout): index=index+1 numpy.savetxt(fileObject, outputMatrix.T, delimiter=',', header=','.join(headers), comments='') fileObject.close() + From f339bf368cea415c5362a4c09c9b299ebf38dcdd Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Wed, 6 Mar 2024 15:54:12 -0700 Subject: [PATCH 61/84] GeneticAlgorithm.py is updated. new file beale_flipped2.py is added. MinwoRepMultiObjective.xml is updated to have more then 2 intrinsic constraints. --- ravenframework/Optimizers/GeneticAlgorithm.py | 4 +-- .../optimizing/beale_flipped2.py | 27 +++++++++++++++++++ .../constrained/MinwoRepMultiObjective.xml | 6 ++--- 3 files changed, 32 insertions(+), 5 deletions(-) create mode 100644 tests/framework/AnalyticModels/optimizing/beale_flipped2.py diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 2b70fd20b9..d85e350a6c 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -445,8 +445,8 @@ def handleInput(self, paramInput): # TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness. if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','logistic', 'feasibleFirst']: self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, logistic, and feasibleFirst as a fitness, whereas provided fitness is {self._fitnessType}') - self._expConstr = self.assemblerObjects['Constraint'][0] if 'Constraint' in self.assemblerObjects else None - self._impConstr = self.assemblerObjects['ImplicitConstraint'][0] if 'ImplicitConstraint' in self.assemblerObjects else None + self._expConstr = self.assemblerObjects['Constraint'] if 'Constraint' in self.assemblerObjects else None + self._impConstr = self.assemblerObjects['ImplicitConstraint'] if 'ImplicitConstraint' in self.assemblerObjects else None if self._expConstr != None and self._impConstr != None: self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External']) elif self._expConstr == None and self._impConstr != None: diff --git a/tests/framework/AnalyticModels/optimizing/beale_flipped2.py b/tests/framework/AnalyticModels/optimizing/beale_flipped2.py new file mode 100644 index 0000000000..eb327f45af --- /dev/null +++ b/tests/framework/AnalyticModels/optimizing/beale_flipped2.py @@ -0,0 +1,27 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# from https://en.wikipedia.org/wiki/Test_functions_for_optimization +# +# takes input parameters x,y +# returns value in "ans" +# optimal maximum at f(3,0.5) = 0 +# parameter range is -4.5 <= x,y <= 4.5 + +def evaluate(x,y): + beale = (1.5 - x + x*y)**2 + (2.25 - x + x*y*y)**2 + (2.625 - x + x*y*y*y)**2 + return -1.0*beale + +def run(self,Inputs): + self.obj1 = -1* evaluate(self.x,self.y) + self.obj2 = evaluate(self.x,self.y) \ No newline at end of file diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml index 740f63510c..1f67af2daf 100644 --- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml +++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml @@ -41,9 +41,9 @@ x1,x2,x3,x4,x5,x6 - + x1,x2,x3,x4,x5,x6,obj1 @@ -106,7 +106,7 @@ optOut MC_samp expConstr3 - + impConstr2 impConstr3 From 261799a2746f8f796ae7783a0fe7bbd68d374ed3 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Wed, 20 Mar 2024 08:55:04 -0600 Subject: [PATCH 62/84] Issues that RAVEN could not catch error when non-rankNCrowdingBased survivorSelector is used for multi-objective optimization have been solved. --- ravenframework/Optimizers/GeneticAlgorithm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index d85e350a6c..6f78553b71 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -432,6 +432,8 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support ageBased, fitnessBased, and rankNcrowdingBased as a survivorSelector, whereas provided survivorSelector is {self._survivorSelectionType}') if len(self._objectiveVar) == 1 and self._survivorSelectionType == 'rankNcrowdingBased': self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ') + if len(self._objectiveVar) > 1 and self._survivorSelectionType != 'rankNcrowdingBased': + self.raiseAnError(IOError, f'The only option supported in for Multi-objective Optimization is (rankNcrowdingBased).') #################################################################################### # fitness node # From 677b47420eff1bd24adb740a463c437cc66b0734 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 13:34:19 -0600 Subject: [PATCH 63/84] RAVEN Manual related changes only are made. --- .../generated/generateOptimizerDoc.py | 14 +- ravenframework/Optimizers/GeneticAlgorithm.py | 174 +++++++++--------- ravenframework/Optimizers/Optimizer.py | 4 +- .../Optimizers/mutators/mutators.py | 2 +- .../survivorSelectors/survivorSelectors.py | 2 +- 5 files changed, 97 insertions(+), 99 deletions(-) diff --git a/doc/user_manual/generated/generateOptimizerDoc.py b/doc/user_manual/generated/generateOptimizerDoc.py index 946522370c..b44e867b29 100644 --- a/doc/user_manual/generated/generateOptimizerDoc.py +++ b/doc/user_manual/generated/generateOptimizerDoc.py @@ -152,7 +152,7 @@ def insertSolnExport(tex, obj): - 20 + 10 rouletteWheel @@ -177,32 +177,32 @@ def insertSolnExport(tex, obj): uniform_dist_woRepl_1 - 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20 + 1,2,3,4,5,6,7,8,9,10 uniform_dist_woRepl_1 - 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1 + 2,3,4,5,6,7,8,9,10,1 uniform_dist_woRepl_1 - 3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2 + 3,4,5,6,7,8,9,10,1,2 uniform_dist_woRepl_1 - 4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2,3 + 4,5,6,7,8,9,10,1,2,3 uniform_dist_woRepl_1 - 5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2,3,4 + 5,6,7,8,9,10,1,2,3,4 uniform_dist_woRepl_1 - 6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2,3,4,5 + 6,7,8,9,10,1,2,3,4,5 ans diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 6f78553b71..b0f550803c 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -52,7 +52,7 @@ class GeneticAlgorithm(RavenSampled): """ convergenceOptions = {'objective': r""" provides the desired value for the convergence criterion of the objective function ($\epsilon^{obj}$). In essence this is solving the inverse problem of finding the design variable - at a given objective value, i.e., convergence is reached when: $$ Objective = \epsilon^{obj}$$. + at a given objective value, i.e., convergence is reached when: $$ Objective = \epsilon^{obj}$$ \default{1e-6}, if no criteria specified""", 'AHDp': r""" provides the desired value for the Average Hausdorff Distance between populations""", 'AHD': r""" provides the desired value for the Hausdorff Distance between populations""", @@ -131,55 +131,34 @@ def getInputSpecification(cls): @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls. """ specs = super(GeneticAlgorithm, cls).getInputSpecification() - specs.description = r"""The \xmlNode{GeneticAlgorithm} optimizer is a metaheuristic approach - to perform a global search in large design spaces. The methodology rose - from the process of natural selection, and like others in the large class - of the evolutionary algorithms, it utilizes genetic operations such as - selection, crossover, and mutations to avoid being stuck in local minima - and hence facilitates finding the global minima. More information can - be found in: - Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73.""" + specs.description = r"""The \xmlNode{GeneticAlgorithm} is a metaheuristic optimization technique inspired by the principles + of natural selection and genetics. Introduced by John Holland in the 1960s, GA mimics the process of + biological evolution to solve complex optimization and search problems. They operate by maintaining a population of + potential solutions represented as as arrays of fixed length variables (genes), and each such array is called a chromosome. + These solutions undergo iterative refinement through processes such as mutation, crossover, and survivor selection. Mutation involves randomly altering certain genes within + individual solutions, introducing diversity into the population and enabling exploration of new regions in the solution space. + Crossover, on the other hand, mimics genetic recombination by exchanging genetic material between two parent solutions to create + offspring with combined traits. Survivor selection determines which solutions will advance to the next generation based on + their fitness—how well they perform in solving the problem at hand. Solutions with higher fitness scores are more likely to + survive and reproduce, passing their genetic material to subsequent generations. This iterative process continues + until a stopping criterion is met, typically when a satisfactory solution is found or after a predetermined number of generations. + More information can be found in:\\\\ + + Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73.\\\\ + + Non-dominated Sorting Genetic Algorithm II (NSGA-II) is a variant of GAs designed for multiobjective optimization problems. + NSGA-II extends traditional GAs by incorporating a ranking-based approach and crowding distance estimation to maintain a diverse set of + non-dominated (Pareto-optimal) solutions. This enables NSGA-II to efficiently explore trade-offs between conflicting objectives, + providing decision-makers with a comprehensive view of the problem's solution space. More information about NSGA-II can be found in:\\\\ + + Deb, Kalyanmoy, et al. "A fast and elitist multiobjective genetic algorithm: NSGA-II." IEEE transactions on evolutionary computation 6.2 (2002): 182-197.\\\\ + + GA in RAVEN supports for both single and multi-objective optimization problem.""" # GA Params GAparams = InputData.parameterInputFactory('GAparams', strictMode=True, printPriority=108, - descr=r""" Genetic Algorithm Parameters:\begin{itemize} - \item populationSize. - \item parentSelectors: - \begin{itemize} - \item rouletteWheel. - \item tournamentSelection. - \item rankSelection. - \end{itemize} - \item Reproduction: - \begin{itemize} - \item crossover: - \begin{itemize} - \item onePointCrossover. - \item twoPointsCrossover. - \item uniformCrossover - \end{itemize} - \item mutators: - \begin{itemize} - \item swapMutator. - \item scrambleMutator. - \item inversionMutator. - \item bitFlipMutator. - \item randomMutator. - \end{itemize} - \end{itemize} - \item survivorSelectors: - \begin{itemize} - \item ageBased. - \item fitnessBased. - \end{itemize} - #NOTE An indicator saying whather GA will handle constraint hardly or softly will be upgraded later @JunyungKim - # \item constraintHandling: - # \begin{itemize} - # \item hard. - # \item soft. - # \end{itemize} - \end{itemize}""") + descr=r""" """) # Population Size populationSize = InputData.parameterInputFactory('populationSize', strictMode=True, contentType=InputTypes.IntegerType, @@ -199,23 +178,27 @@ def getInputSpecification(cls): parentSelection = InputData.parameterInputFactory('parentSelection', strictMode=True, contentType=InputTypes.StringType, printPriority=108, - descr=r"""A node containing the criterion based on which the parents are selected. This can be a - fitness proportional selection such as: - a. \textbf{\textit{rouletteWheel}}, - b. \textbf{\textit{tournamentSelection}}, - c. \textbf{\textit{rankSelection}} - for all methods nParents is computed such that the population size is kept constant. - $nChildren = 2 \times {nParents \choose 2} = nParents \times (nParents-1) = popSize$ - solving for nParents we get: - $nParents = ceil(\frac{1 + \sqrt{1+4*popSize}}{2})$ - This will result in a popSize a little larger than the initial one, these excessive children will be later thrawn away and only the first popSize child will be kept""") + descr=r"""A node containing the criterion based on which the parents are selected. This can be a fitness proportional selection for all methods. + The number of parents (i.e., nParents) is computed such that the population size is kept constant. \\\\ + $nParents = ceil(\frac{1 + \sqrt{1+4*popSize}}{2})$. \\\\ + The number of children (i.e., nChildren) is computed by \\\\ + $nChildren = 2 \times {nParents \choose 2} = nParents \times (nParents-1) = popSize$ \\\\ + This will result in a popSize a little larger than the initial one, and the excessive children will be later thrawn away and only the first popSize child will be kept. \\\\ + You can choose three options for parentSelection: + \begin{itemize} + \item \textit{rouletteWheel} - It assigns probabilities to chromosomes based on their fitness, + allowing for selection proportionate to their likelihood of being chosen for reproduction. + \item \textit{tournamentSelection} - Chromosomes are randomly chosen from the population to compete in a tournament, + and the fittest individual among them is selected for reproduction. + \item \textit{rankSelection} - Chromosomes with higher fitness values are selected. + \end{itemize} + """) GAparams.addSub(parentSelection) # Reproduction reproduction = InputData.parameterInputFactory('reproduction', strictMode=True, printPriority=108, - descr=r"""a node containing the reproduction methods. - This accepts subnodes that specifies the types of crossover and mutation.""") + descr=r"""a node containing the reproduction methods. This accepts subnodes that specifies the types of crossover and mutation. """) # 0. k-selectionNumber of Parents kSelection = InputData.parameterInputFactory('kSelection', strictMode=True, contentType=InputTypes.IntegerType, @@ -226,12 +209,14 @@ def getInputSpecification(cls): crossover = InputData.parameterInputFactory('crossover', strictMode=True, contentType=InputTypes.StringType, printPriority=108, - descr=r"""a subnode containing the implemented crossover mechanisms. - This includes: a. onePointCrossover, - b. twoPointsCrossover, - c. uniformCrossover.""") + descr=r"""a subnode containing the implemented crossover mechanisms. You can choose one of the crossover options listed below: + \begin{itemize} + \item \textit{onePointCrossover} - It selects a random crossover point along the chromosome of parent individuals and swapping the genetic material beyond that point to create offspring. + \item \textit{twoPointsCrossover} - It selects two random crossover points along the chromosome of parent individuals and swapping the genetic material beyond that point to create offspring. + \item \textit{uniformCrossover} - It randomly selects genes from two parent chromosomes with equal probability, creating offspring by exchanging genes at corresponding positions. + \end{itemize}""") crossover.addParam("type", InputTypes.StringType, True, - descr="type of crossover operation to be used (e.g., OnePoint, MultiPoint, or Uniform)") + descr="type of crossover operation to be used. See the list of options above.") crossoverPoint = InputData.parameterInputFactory('points', strictMode=True, contentType=InputTypes.IntegerListType, printPriority=108, @@ -247,14 +232,16 @@ def getInputSpecification(cls): mutation = InputData.parameterInputFactory('mutation', strictMode=True, contentType=InputTypes.StringType, printPriority=108, - descr=r"""a subnode containing the implemented mutation mechanisms. - This includes: a. bitFlipMutation, - b. swapMutation, - c. scrambleMutation, - d. inversionMutation, or - e. randomMutator.""") + descr=r"""a subnode containing the implemented mutation mechanisms. You can choose one of the mutation options listed below: + \begin{itemize} + \item \textit{swapMutator} - It randomly selects two genes within an chromosome and swaps their positions. + \item \textit{scrambleMutator} - It randomly selects a subset of genes within an chromosome and shuffles their positions. + \item \textit{inversionMutator} - It selects a contiguous subset of genes within an chromosome and reverses their order. + \item \textit{bitFlipMutator} - It randomly selects genes within an chromosome and flips their values. + \item \textit{randomMutator} - It randomly selects a gene within an chromosome and mutates the gene. + \end{itemize} """) mutation.addParam("type", InputTypes.StringType, True, - descr="type of mutation operation to be used (e.g., bit, swap, or scramble)") + descr="type of mutation operation to be used. See the list of options above.") mutationLocs = InputData.parameterInputFactory('locs', strictMode=True, contentType=InputTypes.IntegerListType, printPriority=108, @@ -272,27 +259,38 @@ def getInputSpecification(cls): survivorSelection = InputData.parameterInputFactory('survivorSelection', strictMode=True, contentType=InputTypes.StringType, printPriority=108, - descr=r"""a subnode containing the implemented survivor selection mechanisms. - This includes: a. ageBased, or - b. fitnessBased.""") + descr=r"""a subnode containing the implemented survivor selection mechanisms. You can choose one of the survivor selection options listed below: + \begin{itemize} + \item \textit{fitnessBased} - Individuals with higher fitness scores are more likely to be selected to survive and + proceed to the next generation. It suppoort only single-objective optimization problem. + \item \textit{ageBased} - Individuals are selected for survival based on their age or generation, with older individuals being prioritized + for retention. It suppoort only single-objective optimization problem. + \item \textit{rankNcrowdingBased} - Individuals with low rank and crowding distance are more likely to be selected to survive and + proceed to the next generation. It suppoort only multi-objective optimization problem. + \end{itemize}""") GAparams.addSub(survivorSelection) # Fitness fitness = InputData.parameterInputFactory('fitness', strictMode=True, contentType=InputTypes.StringType, printPriority=108, - descr=r"""a subnode containing the implemented fitness functions. - This includes: a. invLinear: $fitness = -a \times obj - b \times \sum_{j=1}^{nConstraint} max(0,-penalty\_j) $. - - b. logistic: $fitness = \frac{1}{1+e^{a\times(obj-b)}}$. - - c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$ - - d. hardConstraint: $fitness = the number of constraints violated.$ - - """) + descr=r"""a subnode containing the implemented fitness functions.You can choose one of the fitness options listed below: + \begin{itemize} + \item \textit{invLinear} - It assigns fitness values inversely proportional to the individual's objective function values, + prioritizing solutions with lower objective function values (i.e., minimization) for selection and reproduction. It suppoort only single-objective optimization problem.\\\\ + $fitness = -a \times obj_j - b \times \sum_{j=1}^{nConstraint} max(0,-penalty_j) $\\ + where j represents an index of objects + \\ + + \item \textit{logistic} - It applies a logistic function to transform raw objective function values into fitness scores. It suppoort only single-objective optimization problem.\\\\ + $fitness = \frac{1}{1+e^{a\times(obj-b)}}$\\ + \item \textit{feasibleFirst} It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions, + + encouraging the evolution of individuals that satisfy the problem's constraints. It suppoort single-and multi-objective optimization problem.\\\\ + $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right$\\ + \end{itemize} """) fitness.addParam("type", InputTypes.StringType, True, - descr=r"""[invLin, logistic, feasibleFirst, hardConstraint]""") + descr=r"""[invLin, logistic, feasibleFirst]""") objCoeff = InputData.parameterInputFactory('a', strictMode=True, contentType=InputTypes.FloatListType, printPriority=108, @@ -337,8 +335,8 @@ def getSolutionExportVariableNames(cls): new = {} # new = {'': 'the size of step taken in the normalized input space to arrive at each optimal point'} new['conv_{CONV}'] = 'status of each given convergence criteria' - new['rank'] = 'rank' - new['CD'] = 'crowding distance' + new['rank'] = 'It refers to the sorting of solutions into non-dominated fronts based on their Pareto dominance relationships' + new['CD'] = 'It measures the density of solutions within each front to guide the selection of diverse individuals for the next generation' new['fitness'] = 'fitness of the current chromosome' new['age'] = 'age of current chromosome' new['batchId'] = 'Id of the batch to whom the chromosome belongs' @@ -436,7 +434,7 @@ def handleInput(self, paramInput): self.raiseAnError(IOError, f'The only option supported in for Multi-objective Optimization is (rankNcrowdingBased).') #################################################################################### - # fitness node # + # fitness / constraint node # #################################################################################### fitnessNode = gaParamsNode.findFirst('fitness') self._fitnessType = fitnessNode.parameterValues['type'] @@ -968,7 +966,7 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): print("### self.population.shape is {}".format(self.population.shape)) for i in range(rlz.sizes['RAVEN_sample_ID']): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) - # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) + rlzDict1 = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) rlzDict['batchId'] = rlz['batchId'].data[i] for j in range(len(self._objectiveVar)): diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py index d80e0c5f32..8f7f7ecfdd 100644 --- a/ravenframework/Optimizers/Optimizer.py +++ b/ravenframework/Optimizers/Optimizer.py @@ -80,8 +80,8 @@ def getInputSpecification(cls): # objective variable specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringListType, strictMode=True, printPriority=90, # more important than - descr=r"""Name of the objective variable (or ``objective function'') that should be optimized - (minimized or maximized).""")) + descr=r"""Name of the objective variable(s) (or ``objective function'') that should be optimized + (minimized or maximized). It can be a single string or a list of strings if it is a multi-objective problem. """)) # modify Sampler variable nodes variable = specs.getSub('variable') diff --git a/ravenframework/Optimizers/mutators/mutators.py b/ravenframework/Optimizers/mutators/mutators.py index 910708cb9f..eb4f1e2b71 100644 --- a/ravenframework/Optimizers/mutators/mutators.py +++ b/ravenframework/Optimizers/mutators/mutators.py @@ -21,7 +21,7 @@ 5. randomMutator Created June,16,2020 - @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi + @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim """ import numpy as np import xarray as xr diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index d2f2c227dd..061a965a08 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -248,5 +248,5 @@ def returnInstance(cls, name): @ Out, __crossovers[name], instance of class """ if name not in __survivorSelectors: - cls.raiseAnError (IOError, "{} MECHANISM NOT IMPLEMENTED!!!!!".format(name)) + cls.raiseAnError (IOError, "{} is not an valid option for survivor selector. Please review the spelling of the survivor selector. ".format(name)) return __survivorSelectors[name] From cf676604b206d6c1c317905ffa868b7845a5620d Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 14:52:31 -0600 Subject: [PATCH 64/84] Minor changes are made. Functionally identical, just for readibility enhancement. --- ravenframework/Optimizers/GeneticAlgorithm.py | 45 +++++++++---------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index b0f550803c..83cfbda24e 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -640,7 +640,11 @@ def multiConstraint(self, info, rlz): return traj, g, objectiveVal, offSprings, offSpringFitness def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): - if self.counter > 1: + if self.counter == 1: + self.population = offSprings + self.fitness = offSpringFitness + self.objectiveVal = rlz[self._objectiveVar[0]].data + else: self.population, self.fitness,\ self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, variables=list(self.toBeSampled), @@ -649,31 +653,12 @@ def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, newRlz=rlz, offSpringsFitness=offSpringFitness, popObjectiveVal=self.objectiveVal) - else: - self.population = offSprings - self.fitness = offSpringFitness - self.objectiveVal = rlz[self._objectiveVar[0]].data def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): - if self.counter > 1: - self.population,self.rank, \ - self.popAge,self.crowdingDistance, \ - self.objectiveVal,self.fitness, \ - self.constraintsV = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - offsprings=rlz, - popObjectiveVal=self.objectiveVal, - offObjectiveVal=objectiveVal, - popFit = self.fitness, - offFit = offSpringFitness, - popConstV = self.constraintsV, - offConstV = g) - else: + if self.counter == 1: self.population = offSprings self.fitness = offSpringFitness self.constraintsV = g - # offspringObjsVals for Rank and CD calculation offObjVal = [] for i in range(len(self._objectiveVar)): @@ -696,6 +681,20 @@ def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, self.objectiveVal = [] for i in range(len(self._objectiveVar)): self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + else: + self.population,self.rank, \ + self.popAge,self.crowdingDistance, \ + self.objectiveVal,self.fitness, \ + self.constraintsV = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + offsprings=rlz, + popObjectiveVal=self.objectiveVal, + offObjectiveVal=objectiveVal, + popFit = self.fitness, + offFit = offSpringFitness, + popConstV = self.constraintsV, + offConstV = g) self._collectOptPointMulti(self.population, self.rank, @@ -966,7 +965,7 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): print("### self.population.shape is {}".format(self.population.shape)) for i in range(rlz.sizes['RAVEN_sample_ID']): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) - rlzDict1 = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) + # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) rlzDict['batchId'] = rlz['batchId'].data[i] for j in range(len(self._objectiveVar)): @@ -1070,7 +1069,7 @@ def _collectOptPointMulti(self, population, rank, CD, objVal, fitness, constrain self.multiBestRank = optRank self.multiBestCD = optCD - return optPointsDic + return def _checkAcceptability(self, traj): From 916eda00687e211e3fbe45f4f8e9c815a4d9b1b7 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 15:25:43 -0600 Subject: [PATCH 65/84] two methods related to survivorSelectors are moved to survivorSelectors.py from GeneticAlgorithm.py. --- ravenframework/Optimizers/GeneticAlgorithm.py | 86 +++---------------- .../survivorSelectors/survivorSelectors.py | 71 ++++++++++++++- 2 files changed, 80 insertions(+), 77 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 83cfbda24e..dce5f9bfe9 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -34,14 +34,14 @@ # External Modules End------------------------------------------------------------------------------ # Internal Modules---------------------------------------------------------------------------------- -from ..utils import mathUtils, InputData, InputTypes, frontUtils +from ..utils import mathUtils, InputData, InputTypes from ..utils.gaUtils import dataArrayToDict, datasetToDataArray from .RavenSampled import RavenSampled from .parentSelectors.parentSelectors import returnInstance as parentSelectionReturnInstance -from .parentSelectors.parentSelectors import countConstViolation from .crossOverOperators.crossovers import returnInstance as crossoversReturnInstance from .mutators.mutators import returnInstance as mutatorsReturnInstance from .survivorSelectors.survivorSelectors import returnInstance as survivorSelectionReturnInstance +from .survivorSelectors import survivorSelectors from .fitness.fitness import returnInstance as fitnessReturnInstance from .repairOperators.repair import returnInstance as repairReturnInstance # Internal Modules End------------------------------------------------------------------------------ @@ -639,71 +639,6 @@ def multiConstraint(self, info, rlz): type = self._minMax) return traj, g, objectiveVal, offSprings, offSpringFitness - def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): - if self.counter == 1: - self.population = offSprings - self.fitness = offSpringFitness - self.objectiveVal = rlz[self._objectiveVar[0]].data - else: - self.population, self.fitness,\ - self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - fitness=self.fitness, - newRlz=rlz, - offSpringsFitness=offSpringFitness, - popObjectiveVal=self.objectiveVal) - - def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): - if self.counter == 1: - self.population = offSprings - self.fitness = offSpringFitness - self.constraintsV = g - # offspringObjsVals for Rank and CD calculation - offObjVal = [] - for i in range(len(self._objectiveVar)): - offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - - # offspringFitVals for Rank and CD calculation - fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data - offspringFitVals = fitVal.tolist() - offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) - self.rank = xr.DataArray(offSpringRank, - dims=['rank'], - coords={'rank': np.arange(np.shape(offSpringRank)[0])}) - offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, - popSize=len(offSpringRank), - objectives=np.array(offspringFitVals)) - - self.crowdingDistance = xr.DataArray(offSpringCD, - dims=['CrowdingDistance'], - coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) - self.objectiveVal = [] - for i in range(len(self._objectiveVar)): - self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - else: - self.population,self.rank, \ - self.popAge,self.crowdingDistance, \ - self.objectiveVal,self.fitness, \ - self.constraintsV = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - offsprings=rlz, - popObjectiveVal=self.objectiveVal, - offObjectiveVal=objectiveVal, - popFit = self.fitness, - offFit = offSpringFitness, - popConstV = self.constraintsV, - offConstV = g) - - self._collectOptPointMulti(self.population, - self.rank, - self.crowdingDistance, - self.objectiveVal, - self.fitness, - self.constraintsV) - self._resolveNewGenerationMulti(traj, rlz, info) - ######################################################################################################### # Run Methods # ######################################################################################################### @@ -734,11 +669,10 @@ def _useRealization(self, info, rlz): const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint) traj, g, objectiveVal, offSprings, offSpringFitness = const(self, info, rlz) - - # 0.2@ n-1: Survivor selection(rlz): Update population container given obtained children if self._activeTraj: - survivorSelectionFuncs: dict = {1: GeneticAlgorithm.singleObjSurvivorSelect, 2: GeneticAlgorithm.multiObjSurvivorSelect} - survivorSelection = survivorSelectionFuncs.get(objInd, GeneticAlgorithm.singleObjSurvivorSelect) + # Step 0 @ n-1: Survivor selection(rlz): Update population container given obtained children + survivorSelectionFuncs: dict = {1: survivorSelectors.singleObjSurvivorSelect, 2: survivorSelectors.multiObjSurvivorSelect} + survivorSelection = survivorSelectionFuncs.get(objInd, survivorSelectors.singleObjSurvivorSelect) survivorSelection(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g) ####################################################################################################### @@ -761,7 +695,7 @@ def _useRealization(self, info, rlz): # plt.savefig('PF_'+str(self.batchId)+'.png') ####################################################################################################### - # 1 @ n: Parent selection from population + # Step 1 @ n: Parent selection from population # Pair parents together by indexes parents = self._parentSelectionInstance(self.population, variables=list(self.toBeSampled), @@ -773,14 +707,14 @@ def _useRealization(self, info, rlz): objVal = self._objectiveVar ) - # 2 @ n: Crossover from set of parents + # Step 2 @ n: Crossover from set of parents # Create childrenCoordinates (x1,...,xM) childrenXover = self._crossoverInstance(parents=parents, variables=list(self.toBeSampled), crossoverProb=self._crossoverProb, points=self._crossoverPoints) - # 3 @ n: Mutation + # Step 3 @ n: Mutation # Perform random directly on childrenCoordinates childrenMutated = self._mutationInstance(offSprings=childrenXover, distDict=self.distDict, @@ -788,7 +722,7 @@ def _useRealization(self, info, rlz): mutationProb=self._mutationProb, variables=list(self.toBeSampled)) - # 4 @ n: repair/replacement + # Step 4 @ n: repair/replacement # Repair should only happen if multiple genes in a single chromosome have the same values (), # and at the same time the sampling of these genes should be with Out replacement. needsRepair = False @@ -812,7 +746,7 @@ def _useRealization(self, info, rlz): coords={'chromosome': np.arange(np.shape(children)[0]), 'Gene':list(self.toBeSampled)}) - # 5 @ n: Submit children batch + # Step 5 @ n: Submit children batch # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates for i in range(self.batch): newRlz = {} diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index 061a965a08..3ab2760895 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -19,7 +19,7 @@ 2. fitnessBased Created June,16,2020 - @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi + @authors: Mohammad Abdo, Junyung Kim, Diego Mandelli, Andrea Alfonsi """ # External Modules---------------------------------------------------------------------------------- import numpy as np @@ -32,6 +32,75 @@ # Internal Modules End------------------------------------------------------------------------------ # @profile + +def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): + if self.counter == 1: + self.population = offSprings + self.fitness = offSpringFitness + self.objectiveVal = rlz[self._objectiveVar[0]].data + else: + self.population, self.fitness,\ + self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + fitness=self.fitness, + newRlz=rlz, + offSpringsFitness=offSpringFitness, + popObjectiveVal=self.objectiveVal) + +def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): + if self.counter == 1: + self.population = offSprings + self.fitness = offSpringFitness + self.constraintsV = g + # offspringObjsVals for Rank and CD calculation + offObjVal = [] + for i in range(len(self._objectiveVar)): + offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + # offspringFitVals for Rank and CD calculation + fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data + offspringFitVals = fitVal.tolist() + offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) + self.rank = xr.DataArray(offSpringRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(offSpringRank)[0])}) + offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, + popSize=len(offSpringRank), + objectives=np.array(offspringFitVals)) + + self.crowdingDistance = xr.DataArray(offSpringCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) + self.objectiveVal = [] + for i in range(len(self._objectiveVar)): + self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + else: + self.population,self.rank, \ + self.popAge,self.crowdingDistance, \ + self.objectiveVal,self.fitness, \ + self.constraintsV = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + offsprings=rlz, + popObjectiveVal=self.objectiveVal, + offObjectiveVal=objectiveVal, + popFit = self.fitness, + offFit = offSpringFitness, + popConstV = self.constraintsV, + offConstV = g) + + self._collectOptPointMulti(self.population, + self.rank, + self.crowdingDistance, + self.objectiveVal, + self.fitness, + self.constraintsV) + self._resolveNewGenerationMulti(traj, rlz, info) + + + + def ageBased(newRlz,**kwargs): """ ageBased survivorSelection mechanism for new generation selection. From 366974e84729de93eedf820b4680c8ffeac90bd8 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 15:43:33 -0600 Subject: [PATCH 66/84] =?UTF-8?q?Some=20comments=20are=20left=20in=20fitne?= =?UTF-8?q?ss.py=20for=20future=20reference.=20invLinear=20=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ravenframework/Optimizers/fitness/fitness.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 4ba2cf1c4b..33ba5885a3 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -59,6 +59,9 @@ def invLinear(rlz,**kwargs): the farthest from violating the constraint it is, The highest negative value it have the largest the violation is. @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ + #NOTE invLinear is not yet support Multi-objective optimization problem solving. Further literature reivew applying invLinear method to multi-objective optimization + # needs to be involved. Potentially, applying obj_Worst in fitness function (i.e., -a[j] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[j] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1)) + # should be considerd. a = [1.0] if kwargs['a'] == None else kwargs['a'] b = [10.0] if kwargs['b'] == None else kwargs['b'] penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data From ceb701d0f22bf7494c13e8c0d5ef3a569bf5e831 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 15:45:08 -0600 Subject: [PATCH 67/84] Some comments are left in fitness.py for future reference. invLinear and logistics method should be revisited later for multi-objective problem. --- ravenframework/Optimizers/fitness/fitness.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 33ba5885a3..332daeef5d 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -61,7 +61,7 @@ def invLinear(rlz,**kwargs): """ #NOTE invLinear is not yet support Multi-objective optimization problem solving. Further literature reivew applying invLinear method to multi-objective optimization # needs to be involved. Potentially, applying obj_Worst in fitness function (i.e., -a[j] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[j] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1)) - # should be considerd. + # should be considerd . a = [1.0] if kwargs['a'] == None else kwargs['a'] b = [10.0] if kwargs['b'] == None else kwargs['b'] penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data From 35e65e796aee86f955e187713d995bcd91f8a540 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 15:50:10 -0600 Subject: [PATCH 68/84] Some comments are added/deleted. --- ravenframework/Optimizers/GeneticAlgorithm.py | 21 +++++++------------ 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index dce5f9bfe9..1561d82764 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -660,10 +660,6 @@ def _useRealization(self, info, rlz): """ info['step'] = self.counter - - # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation) - # 0.1 @ n-1: fitnessCalculation(rlz): Perform fitness calculation for newly obtained children (rlz) - objInd = int(len(self._objectiveVar)>1) + 1 #if len(self._objectiveVar) == 1 else 2 constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint} const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint) @@ -675,8 +671,8 @@ def _useRealization(self, info, rlz): survivorSelection = survivorSelectionFuncs.get(objInd, survivorSelectors.singleObjSurvivorSelect) survivorSelection(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g) - ####################################################################################################### - # ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used + # Step 1 @ n-1: Plot results + # ## TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used # ## These are currently for debugging purposes @JunyungKim # import matplotlib.pyplot as plt @@ -693,9 +689,8 @@ def _useRealization(self, info, rlz): # newMultiBestObjective[i,1], str(self.batchId)) # # plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') # plt.savefig('PF_'+str(self.batchId)+'.png') - ####################################################################################################### - - # Step 1 @ n: Parent selection from population + + # Step 2 @ n: Parent selection from population # Pair parents together by indexes parents = self._parentSelectionInstance(self.population, variables=list(self.toBeSampled), @@ -707,14 +702,14 @@ def _useRealization(self, info, rlz): objVal = self._objectiveVar ) - # Step 2 @ n: Crossover from set of parents + # Step 3 @ n: Crossover from set of parents # Create childrenCoordinates (x1,...,xM) childrenXover = self._crossoverInstance(parents=parents, variables=list(self.toBeSampled), crossoverProb=self._crossoverProb, points=self._crossoverPoints) - # Step 3 @ n: Mutation + # Step 4 @ n: Mutation # Perform random directly on childrenCoordinates childrenMutated = self._mutationInstance(offSprings=childrenXover, distDict=self.distDict, @@ -722,7 +717,7 @@ def _useRealization(self, info, rlz): mutationProb=self._mutationProb, variables=list(self.toBeSampled)) - # Step 4 @ n: repair/replacement + # Step 5 @ n: repair/replacement # Repair should only happen if multiple genes in a single chromosome have the same values (), # and at the same time the sampling of these genes should be with Out replacement. needsRepair = False @@ -746,7 +741,7 @@ def _useRealization(self, info, rlz): coords={'chromosome': np.arange(np.shape(children)[0]), 'Gene':list(self.toBeSampled)}) - # Step 5 @ n: Submit children batch + # Step 6 @ n: Submit children batch # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates for i in range(self.batch): newRlz = {} From c8ac5c97752430408f38436a59e885fd832314a7 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 16:11:47 -0600 Subject: [PATCH 69/84] some files in NSGAII folder which are already relocated to other folders are now removed. --- .../MultiSumwConst/MinwoRepMultiObjective.xml | 160 ---------- .../MultiSumwConst/myConstraints.py | 150 --------- .../MultiSumwConst/myLocalSum_multi.py | 43 --- .../NSGAII/discrete/constrained/ZDT1/ZDT1.xml | 295 ------------------ .../discrete/constrained/ZDT1/ZDT_model.py | 43 --- 5 files changed, 691 deletions(-) delete mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml delete mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py delete mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py delete mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml delete mode 100644 tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml deleted file mode 100644 index d961e60adf..0000000000 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml +++ /dev/null @@ -1,160 +0,0 @@ - - - - \raven\tests\framework\Optimizers\NSGAII\discrete\constrained\ - Junyung Kim, Mohammad Abdo - 2022-12-21 - - NSGA-II min-min test - - - - Multi_MinwoReplacement_Figure_720 - optimize,print - 1 - - - - - placeholder - myLocalSum - GAopt - opt_export - optOut - opt_export - - - opt_export - optOut - opt_export - optOut - - - - - - x1,x2,x3,x4,x5,x6,obj1,obj2 - - - - - - x1,x2,x3,x4,x5,x6 - - - x1,x2,x3,x4,x5,x6,obj1 - - - - - - 2 - 7 - withoutReplacement - - - - - - - 3 - 42 - every - min, max - - - - 15 - tournamentSelection - - - 0.7 - - - 0.7 - - - - - rankNcrowdingBased - - - 0.0 - - - woRep_dist - - - woRep_dist - - - woRep_dist - - - woRep_dist - - - woRep_dist - - - woRep_dist - - obj1, obj2 - optOut - MC_samp - expConstr3 - impConstr3 - - - - - - - 15 - 050877 - - - woRep_dist - - - woRep_dist - - - woRep_dist - - - woRep_dist - - - woRep_dist - - - woRep_dist - - - - - - - - x1,x2,x3,x4,x5,x6 - obj1,obj2 - - - trajID - x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3, ConstraintEvaluation_impConstr3,fitness,accepted - - - - - - csv - optOut - - - csv - opt_export - trajID - - - diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py deleted file mode 100644 index 4d3b5f51c9..0000000000 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# @ author: Mohammad Abdo (@Jimmy-INL) - -import numpy as np - -def constrain(Input):#Complete this: give the function the correct name# - """ - This function calls the explicit constraint whose name is passed through Input.name - the evaluation function g is negative if the explicit constraint is violated and positive otherwise. - This suits the constraint handling in the Genetic Algorithms, - but not the Gradient Descent as the latter expects True if the solution passes the constraint and False if it violates it. - @ In, Input, object, RAVEN container - @ Out, g, float, explicit constraint evaluation (negative if violated and positive otherwise) - """ - g = eval(Input.name)(Input) - return g - -def implicitConstraint(Input): - """ - Evaluates the implicit constraint function at a given point/solution ($\vec(x)$) - @ In, Input, object, RAVEN container - @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function - the way the constraint is designed is that - the constraint function has to be >= 0, - so if: - 1) f(x,y) >= 0 then g = f - 2) f(x,y) >= a then g = f - a - 3) f(x,y) <= b then g = b - f - 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint) - """ - g = eval(Input.name)(Input) - return g - - -def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml# - """ - Let's assume that the constraint is: - $ x3+x4 < 8 $ - then g the constraint evaluation function (which has to be > 0) is taken to be: - g = 8 - (x3+x4) - in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa - @ In, Input, object, RAVEN container - @ out, g, float, explicit constraint 1 evaluation function - """ - g = 8 - Input.x3 - Input.x4 - return g - -def expConstr2(Input): - """ - Explicit Equality Constraint: - let's consider the constraint x1**2 + x2**2 = 25 - The way to write g is to use a very small number for instance, epsilon = 1e-12 - and then g = epsilon - abs(constraint) - @ In, Input, object, RAVEN container - @ out, g, float, explicit constraint 2 evaluation function - """ - g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25) - return g - -def expConstr3(Input): - """ - @ In, Input, object, RAVEN container - @ out, g, float, explicit constraint 3 evaluation function - """ - g = 10 - Input.x3 - Input.x4 - return g - -def impConstr1(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 1 evaluation function - """ - g = 10 - Input.x1**2 - Input.obj - return g - -def impConstr2(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 2 evaluation function - """ - g = Input.x1**2 + Input.obj - 10 - return g - -def impConstr3(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 3 evaluation function - """ - g = 100 - Input.obj1 - return g - -def impConstr4(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 3 evaluation function - """ - g = Input.obj2 - 16 - return g - -def impConstr5(Input): - """ - The implicit constraint involves variables from the output space, for example the objective variable or - a dependent variable that is not in the optimization search space - @ In, Input, object, RAVEN container - @ out, g, float, implicit constraint 3 evaluation function - """ - g = 200 - Input.obj1 - return g - - """ - Evaluates the implicit constraint function at a given point/solution ($\vec(x)$) - @ In, Input, object, RAVEN container - @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function - the way the constraint is designed is that - the constraint function has to be >= 0, - so if: - 1) f(x,y) >= 0 then g = f - 2) f(x,y) >= a then g = f - a - 3) f(x,y) <= b then g = b - f - 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint) - """ - """ - Let's assume that the constraint is: - $ x3+x4 < 8 $ - then g the constraint evaluation function (which has to be > 0) is taken to be: - g = 8 - (x3+x4) - in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa - @ In, Input, object, RAVEN container - @ out, g, float, explicit constraint 1 evaluation function - """ \ No newline at end of file diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py deleted file mode 100644 index 86ef17bdeb..0000000000 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# @author: Mohammad Abdo (@Jimmy-INL) - -def evaluate(Inputs): - Sum = 0 - LocalSum1 = 0 - LocalSum2 = 0 - # for ind,var in enumerate(Inputs.keys()): - # # write the objective function here - # Sum += (ind + 1) * Inputs[var] - # if (ind == 1): - # LocalSum1 = Sum - # return Sum[:], LocalSum1[:] - for ind,var in enumerate(Inputs.keys()): - # write the objective function here - Sum += (ind + 1) * Inputs[var] - if (ind == 0) or (ind == 1): - LocalSum1 += (ind + 1) * Inputs[var] - if (ind == 2) or (ind == 3): - LocalSum2 += (ind + 1) * Inputs[var] - return Sum[:], LocalSum1[:], LocalSum2[:] - -def run(self,Inputs): - """ - RAVEN API - @ In, self, object, RAVEN container - @ In, Inputs, dict, additional inputs - @ Out, None - """ - self.obj1,self.obj2,self.obj3 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3. diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml deleted file mode 100644 index 80ad0c28a7..0000000000 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml +++ /dev/null @@ -1,295 +0,0 @@ - - - - raven\tests\framework\Optimizers\NSGAII\discrete\constrained\ - Junyung Kim, Mohammad Abdo - 2023-02-21 - - ZDT1 test using NSGA-II - - - - ZDT1_result_300iter_150Popu - optimize,print - 1 - - - - - placeholder - ZDT - GAopt - opt_export - optOut - opt_export - - - opt_export - optOut - opt_export - optOut - - - - - - x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30,obj1,obj2 - - - - - - 0 - 1 - - - - - - - 300 - 42 - every - min - - - - 150 - tournamentSelection - - - 1.0 - - - 1.0 - - - - - rankNcrowdingBased - - - - 0.0 - - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - obj1, obj2 - optOut - MC_samp - - - - - - - 150 - 050877 - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - unifDist - - - - - - - - x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 - obj1,obj2 - - - trajID - x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30,obj1,obj2,age,batchId,rank,CD,fitness,accepted - - - - - - csv - optOut - - - csv - opt_export - trajID - - - diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py deleted file mode 100644 index 829307f73e..0000000000 --- a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# @author: Junyung Kim (@JunyungKim-INL) and Mohammad Abdo (@Jimmy-INL) - -import math - -def evaluate(Inputs): - Sum = 0 - obj1 = 0 - - for ind,var in enumerate(Inputs.keys()): - # write the objective function here - if (ind == 0) : - obj1 = Inputs[var] - if (ind != 0): - Sum += Inputs[var] - g = 1 + (9/len(Inputs.keys())*Sum ) - h = 1 - math.sqrt(obj1/g) - obj2 = g*h - return obj1[:], obj2[:] - -def run(self,Inputs): - """ - RAVEN API - @ In, self, object, RAVEN container - @ In, Inputs, dict, additional inputs - @ Out, None - """ - self.obj1,self.obj2 = evaluate(Inputs) - - From ce0aaada424b800c65e9a0316b23f0c9c567adb4 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 16:14:54 -0600 Subject: [PATCH 70/84] commentations and code cleaning is dnoe in GeneticAlgorithm.py. Functionally no changes. --- ravenframework/Optimizers/GeneticAlgorithm.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 1561d82764..84c2713395 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -461,22 +461,27 @@ def handleInput(self, paramInput): else: pass self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None - #NOTE the code lines below are for 'feasibleFirst' temperarily. It will be generalized for invLinear as well. + + # TODO: @JunyungKim, the code lines below are for 'feasibleFirst' temperarily. It should be generalized for invLinear as well. if self._fitnessType == 'feasibleFirst': - if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: + # Case 1: There is constraint(s) and penaltyCoeff are given by users + if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: + self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else [1] * len(self._objectiveVar) self._penaltyCoeff = fitnessNode.findFirst('b').value - self._objCoeff = fitnessNode.findFirst('a').value + # Case 2: There is NO constraint and penaltyCoeff are given by users elif self._numOfConst == 0 and fitnessNode.findFirst('b') is not None: self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints') + # Case 3: There is constraint(s) and penaltyCoeff is NOT given by users elif self._numOfConst != 0 and fitnessNode.findFirst('b') is None: - self._penaltyCoeff = [1] * self._numOfConst * len(self._objectiveVar) #list(np.repeat(1, self._numOfConst * len(self._objectiveVar))) #NOTE if penaltyCoeff is not provided, then assume they are all 1. - self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else [1] * len(self._objectiveVar) #list(np.repeat( + self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else [1] * len(self._objectiveVar) #NOTE if objCoeff is not provided, then assume they are all 1. + self._penaltyCoeff = [1] * self._numOfConst * len(self._objectiveVar) #NOTE if penaltyCoeff is not provided, then assume they are all 1. + # Case 4: There is NO constraint and penaltyCoeff is NOT given by users else: - self._penaltyCoeff = [0] * len(self._objectiveVar) #list(np.repeat(0, len(self._objectiveVar))) self._objCoeff = [1] * len(self._objectiveVar) + self._penaltyCoeff = [0] * len(self._objectiveVar) else: - self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None + self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType) self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented. From cfd5b31fc1425a7e802e726840651d069052850e Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 18:13:59 -0600 Subject: [PATCH 71/84] rlzDict in def _resolveNewGenerationMulti is updated to avoid SIMULATE3-related errors. --- ravenframework/Optimizers/GeneticAlgorithm.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 84c2713395..9424a5971e 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -42,8 +42,10 @@ from .mutators.mutators import returnInstance as mutatorsReturnInstance from .survivorSelectors.survivorSelectors import returnInstance as survivorSelectionReturnInstance from .survivorSelectors import survivorSelectors +from .survivorSelectors import survivorSelectors from .fitness.fitness import returnInstance as fitnessReturnInstance from .repairOperators.repair import returnInstance as repairReturnInstance + # Internal Modules End------------------------------------------------------------------------------ class GeneticAlgorithm(RavenSampled): @@ -901,6 +903,7 @@ def _resolveNewGenerationMulti(self, traj, rlz, info): varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys()) # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars) rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data)) + rlzDict.update(dict((var,objVal.data[i][j]) for j, var in enumerate(objVal.obj.data))) rlzDict['batchId'] = rlz['batchId'].data[i] for j in range(len(self._objectiveVar)): rlzDict[self._objectiveVar[j]] = objVal.data[i][j] From 9d8941caf2f35ad672afe46baef28ed3ccf95025 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 18:19:39 -0600 Subject: [PATCH 72/84] user manual related update - Equation correction --- ravenframework/Optimizers/GeneticAlgorithm.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 9424a5971e..ba5162916b 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -287,9 +287,8 @@ def getInputSpecification(cls): \item \textit{logistic} - It applies a logistic function to transform raw objective function values into fitness scores. It suppoort only single-objective optimization problem.\\\\ $fitness = \frac{1}{1+e^{a\times(obj-b)}}$\\ \item \textit{feasibleFirst} It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions, - encouraging the evolution of individuals that satisfy the problem's constraints. It suppoort single-and multi-objective optimization problem.\\\\ - $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right$\\ + $fitness = \left\{\begin{matrix} -obj & g_{j}(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right$\\ \end{itemize} """) fitness.addParam("type", InputTypes.StringType, True, descr=r"""[invLin, logistic, feasibleFirst]""") From 580cad832ae0114dd21123d8ab7fbff293b84307 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 18:28:04 -0600 Subject: [PATCH 73/84] user manual related update - Equation correction --- ravenframework/Optimizers/GeneticAlgorithm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index ba5162916b..d78813ce13 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -280,13 +280,14 @@ def getInputSpecification(cls): \begin{itemize} \item \textit{invLinear} - It assigns fitness values inversely proportional to the individual's objective function values, prioritizing solutions with lower objective function values (i.e., minimization) for selection and reproduction. It suppoort only single-objective optimization problem.\\\\ - $fitness = -a \times obj_j - b \times \sum_{j=1}^{nConstraint} max(0,-penalty_j) $\\ + $fitness = -a \times obj - b \times \sum_{j=1}^{nConstraint} max(0,-penalty_{j}) $\\ where j represents an index of objects \\ \item \textit{logistic} - It applies a logistic function to transform raw objective function values into fitness scores. It suppoort only single-objective optimization problem.\\\\ $fitness = \frac{1}{1+e^{a\times(obj-b)}}$\\ \item \textit{feasibleFirst} It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions, + encouraging the evolution of individuals that satisfy the problem's constraints. It suppoort single-and multi-objective optimization problem.\\\\ $fitness = \left\{\begin{matrix} -obj & g_{j}(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right$\\ \end{itemize} """) From 06f4a4694792c46fcd66b163349c19fb7c3923bf Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Tue, 2 Apr 2024 18:46:05 -0600 Subject: [PATCH 74/84] very minor change made for user manuel. --- ravenframework/Optimizers/GeneticAlgorithm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index d78813ce13..36b9797af5 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -286,7 +286,7 @@ def getInputSpecification(cls): \item \textit{logistic} - It applies a logistic function to transform raw objective function values into fitness scores. It suppoort only single-objective optimization problem.\\\\ $fitness = \frac{1}{1+e^{a\times(obj-b)}}$\\ - \item \textit{feasibleFirst} It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions, + \item \textit{feasibleFirst} - It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions, encouraging the evolution of individuals that satisfy the problem's constraints. It suppoort single-and multi-objective optimization problem.\\\\ $fitness = \left\{\begin{matrix} -obj & g_{j}(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right$\\ From a9039396762f5b4cae872d8c32c2ab7a4af7146d Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Wed, 3 Apr 2024 16:15:24 -0600 Subject: [PATCH 75/84] survivorSelection.py is created. --- ravenframework/Optimizers/GeneticAlgorithm.py | 13 ++- .../survivorSelection/survivorSelection.py | 96 +++++++++++++++++++ .../survivorSelectors/survivorSelectors.py | 68 ------------- 3 files changed, 102 insertions(+), 75 deletions(-) create mode 100644 ravenframework/Optimizers/survivorSelection/survivorSelection.py diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 36b9797af5..1dabb6f8da 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -41,8 +41,7 @@ from .crossOverOperators.crossovers import returnInstance as crossoversReturnInstance from .mutators.mutators import returnInstance as mutatorsReturnInstance from .survivorSelectors.survivorSelectors import returnInstance as survivorSelectionReturnInstance -from .survivorSelectors import survivorSelectors -from .survivorSelectors import survivorSelectors +from .survivorSelection import survivorSelection as survivorSelectionProcess from .fitness.fitness import returnInstance as fitnessReturnInstance from .repairOperators.repair import returnInstance as repairReturnInstance @@ -618,9 +617,9 @@ def multiConstraint(self, info, rlz): g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions))) g = xr.DataArray(g0, - dims=['chromosome','Constraint'], - coords={'chromosome':np.arange(np.shape(offSprings)[0]), - 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) + dims=['chromosome','Constraint'], + coords={'chromosome':np.arange(np.shape(offSprings)[0]), + 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]}) for index,individual in enumerate(offSprings): newOpt = individual @@ -674,8 +673,8 @@ def _useRealization(self, info, rlz): if self._activeTraj: # Step 0 @ n-1: Survivor selection(rlz): Update population container given obtained children - survivorSelectionFuncs: dict = {1: survivorSelectors.singleObjSurvivorSelect, 2: survivorSelectors.multiObjSurvivorSelect} - survivorSelection = survivorSelectionFuncs.get(objInd, survivorSelectors.singleObjSurvivorSelect) + survivorSelectionFuncs: dict = {1: survivorSelectionProcess.singleObjSurvivorSelect, 2: survivorSelectionProcess.multiObjSurvivorSelect} + survivorSelection = survivorSelectionFuncs.get(objInd, survivorSelectionProcess.singleObjSurvivorSelect) survivorSelection(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g) # Step 1 @ n-1: Plot results diff --git a/ravenframework/Optimizers/survivorSelection/survivorSelection.py b/ravenframework/Optimizers/survivorSelection/survivorSelection.py new file mode 100644 index 0000000000..8d195fbadd --- /dev/null +++ b/ravenframework/Optimizers/survivorSelection/survivorSelection.py @@ -0,0 +1,96 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Implementation of survivorSelection step for new generation + selection process in Genetic Algorithm. + + Created Apr,3,2024 + @authors: Mohammad Abdo, Junyung Kim +""" +# External Modules---------------------------------------------------------------------------------- +import numpy as np +import xarray as xr +from ravenframework.utils import frontUtils +# External Modules End------------------------------------------------------------------------------ + +# Internal Modules---------------------------------------------------------------------------------- +from ...utils.gaUtils import dataArrayToDict, datasetToDataArray +# Internal Modules End------------------------------------------------------------------------------ + +# @profile + +def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): + if self.counter == 1: + self.population = offSprings + self.fitness = offSpringFitness + self.objectiveVal = rlz[self._objectiveVar[0]].data + else: + self.population, self.fitness,\ + self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + fitness=self.fitness, + newRlz=rlz, + offSpringsFitness=offSpringFitness, + popObjectiveVal=self.objectiveVal) + +def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): + if self.counter == 1: + self.population = offSprings + self.fitness = offSpringFitness + self.constraintsV = g + # offspringObjsVals for Rank and CD calculation + offObjVal = [] + for i in range(len(self._objectiveVar)): + offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + + # offspringFitVals for Rank and CD calculation + fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data + offspringFitVals = fitVal.tolist() + offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) + self.rank = xr.DataArray(offSpringRank, + dims=['rank'], + coords={'rank': np.arange(np.shape(offSpringRank)[0])}) + offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, + popSize=len(offSpringRank), + objectives=np.array(offspringFitVals)) + + self.crowdingDistance = xr.DataArray(offSpringCD, + dims=['CrowdingDistance'], + coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) + self.objectiveVal = [] + for i in range(len(self._objectiveVar)): + self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) + else: + self.population,self.rank, \ + self.popAge,self.crowdingDistance, \ + self.objectiveVal,self.fitness, \ + self.constraintsV = self._survivorSelectionInstance(age=self.popAge, + variables=list(self.toBeSampled), + population=self.population, + offsprings=rlz, + popObjectiveVal=self.objectiveVal, + offObjectiveVal=objectiveVal, + popFit = self.fitness, + offFit = offSpringFitness, + popConstV = self.constraintsV, + offConstV = g) + + self._collectOptPointMulti(self.population, + self.rank, + self.crowdingDistance, + self.objectiveVal, + self.fitness, + self.constraintsV) + self._resolveNewGenerationMulti(traj, rlz, info) \ No newline at end of file diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py index 3ab2760895..d5649880a4 100644 --- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py +++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py @@ -33,74 +33,6 @@ # @profile -def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): - if self.counter == 1: - self.population = offSprings - self.fitness = offSpringFitness - self.objectiveVal = rlz[self._objectiveVar[0]].data - else: - self.population, self.fitness,\ - self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - fitness=self.fitness, - newRlz=rlz, - offSpringsFitness=offSpringFitness, - popObjectiveVal=self.objectiveVal) - -def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g): - if self.counter == 1: - self.population = offSprings - self.fitness = offSpringFitness - self.constraintsV = g - # offspringObjsVals for Rank and CD calculation - offObjVal = [] - for i in range(len(self._objectiveVar)): - offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - - # offspringFitVals for Rank and CD calculation - fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data - offspringFitVals = fitVal.tolist() - offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals)) - self.rank = xr.DataArray(offSpringRank, - dims=['rank'], - coords={'rank': np.arange(np.shape(offSpringRank)[0])}) - offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank, - popSize=len(offSpringRank), - objectives=np.array(offspringFitVals)) - - self.crowdingDistance = xr.DataArray(offSpringCD, - dims=['CrowdingDistance'], - coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])}) - self.objectiveVal = [] - for i in range(len(self._objectiveVar)): - self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data))) - else: - self.population,self.rank, \ - self.popAge,self.crowdingDistance, \ - self.objectiveVal,self.fitness, \ - self.constraintsV = self._survivorSelectionInstance(age=self.popAge, - variables=list(self.toBeSampled), - population=self.population, - offsprings=rlz, - popObjectiveVal=self.objectiveVal, - offObjectiveVal=objectiveVal, - popFit = self.fitness, - offFit = offSpringFitness, - popConstV = self.constraintsV, - offConstV = g) - - self._collectOptPointMulti(self.population, - self.rank, - self.crowdingDistance, - self.objectiveVal, - self.fitness, - self.constraintsV) - self._resolveNewGenerationMulti(traj, rlz, info) - - - - def ageBased(newRlz,**kwargs): """ ageBased survivorSelection mechanism for new generation selection. From 8482a394eed55c16dc0a18a342ebd64b7e90e6c1 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 23 May 2024 11:33:08 -0600 Subject: [PATCH 76/84] contaminated HERON and TEAL is now back to RAVEN. --- plugins/HERON | 2 +- plugins/TEAL | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 160000 plugins/TEAL diff --git a/plugins/HERON b/plugins/HERON index cfc5d064eb..290e83f950 160000 --- a/plugins/HERON +++ b/plugins/HERON @@ -1 +1 @@ -Subproject commit cfc5d064ebce0f52b2d4d87199e3865bd459dc95 +Subproject commit 290e83f950fdb01bf1c7e0a464e3b197eadc3696 diff --git a/plugins/TEAL b/plugins/TEAL new file mode 160000 index 0000000000..97e32dbde3 --- /dev/null +++ b/plugins/TEAL @@ -0,0 +1 @@ +Subproject commit 97e32dbde3856aad8fda59010b1d845f2fbc7124 From 372f3849cbc635618157454144c424b4a9a0ecec Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 23 May 2024 11:56:23 -0600 Subject: [PATCH 77/84] trailing whitespaces are removed. --- ravenframework/Optimizers/GeneticAlgorithm.py | 70 +++++++++---------- ravenframework/Optimizers/fitness/fitness.py | 4 +- .../survivorSelection/survivorSelection.py | 2 +- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index 1dabb6f8da..dc4b447e09 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -41,7 +41,7 @@ from .crossOverOperators.crossovers import returnInstance as crossoversReturnInstance from .mutators.mutators import returnInstance as mutatorsReturnInstance from .survivorSelectors.survivorSelectors import returnInstance as survivorSelectionReturnInstance -from .survivorSelection import survivorSelection as survivorSelectionProcess +from .survivorSelection import survivorSelection as survivorSelectionProcess from .fitness.fitness import returnInstance as fitnessReturnInstance from .repairOperators.repair import returnInstance as repairReturnInstance @@ -132,28 +132,28 @@ def getInputSpecification(cls): @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls. """ specs = super(GeneticAlgorithm, cls).getInputSpecification() - specs.description = r"""The \xmlNode{GeneticAlgorithm} is a metaheuristic optimization technique inspired by the principles - of natural selection and genetics. Introduced by John Holland in the 1960s, GA mimics the process of - biological evolution to solve complex optimization and search problems. They operate by maintaining a population of - potential solutions represented as as arrays of fixed length variables (genes), and each such array is called a chromosome. + specs.description = r"""The \xmlNode{GeneticAlgorithm} is a metaheuristic optimization technique inspired by the principles + of natural selection and genetics. Introduced by John Holland in the 1960s, GA mimics the process of + biological evolution to solve complex optimization and search problems. They operate by maintaining a population of + potential solutions represented as as arrays of fixed length variables (genes), and each such array is called a chromosome. These solutions undergo iterative refinement through processes such as mutation, crossover, and survivor selection. Mutation involves randomly altering certain genes within - individual solutions, introducing diversity into the population and enabling exploration of new regions in the solution space. - Crossover, on the other hand, mimics genetic recombination by exchanging genetic material between two parent solutions to create - offspring with combined traits. Survivor selection determines which solutions will advance to the next generation based on - their fitness—how well they perform in solving the problem at hand. Solutions with higher fitness scores are more likely to - survive and reproduce, passing their genetic material to subsequent generations. This iterative process continues - until a stopping criterion is met, typically when a satisfactory solution is found or after a predetermined number of generations. - More information can be found in:\\\\ + individual solutions, introducing diversity into the population and enabling exploration of new regions in the solution space. + Crossover, on the other hand, mimics genetic recombination by exchanging genetic material between two parent solutions to create + offspring with combined traits. Survivor selection determines which solutions will advance to the next generation based on + their fitness—how well they perform in solving the problem at hand. Solutions with higher fitness scores are more likely to + survive and reproduce, passing their genetic material to subsequent generations. This iterative process continues + until a stopping criterion is met, typically when a satisfactory solution is found or after a predetermined number of generations. + More information can be found in:\\\\ Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73.\\\\ - - Non-dominated Sorting Genetic Algorithm II (NSGA-II) is a variant of GAs designed for multiobjective optimization problems. - NSGA-II extends traditional GAs by incorporating a ranking-based approach and crowding distance estimation to maintain a diverse set of - non-dominated (Pareto-optimal) solutions. This enables NSGA-II to efficiently explore trade-offs between conflicting objectives, - providing decision-makers with a comprehensive view of the problem's solution space. More information about NSGA-II can be found in:\\\\ + + Non-dominated Sorting Genetic Algorithm II (NSGA-II) is a variant of GAs designed for multiobjective optimization problems. + NSGA-II extends traditional GAs by incorporating a ranking-based approach and crowding distance estimation to maintain a diverse set of + non-dominated (Pareto-optimal) solutions. This enables NSGA-II to efficiently explore trade-offs between conflicting objectives, + providing decision-makers with a comprehensive view of the problem's solution space. More information about NSGA-II can be found in:\\\\ Deb, Kalyanmoy, et al. "A fast and elitist multiobjective genetic algorithm: NSGA-II." IEEE transactions on evolutionary computation 6.2 (2002): 182-197.\\\\ - + GA in RAVEN supports for both single and multi-objective optimization problem.""" # GA Params @@ -179,7 +179,7 @@ def getInputSpecification(cls): parentSelection = InputData.parameterInputFactory('parentSelection', strictMode=True, contentType=InputTypes.StringType, printPriority=108, - descr=r"""A node containing the criterion based on which the parents are selected. This can be a fitness proportional selection for all methods. + descr=r"""A node containing the criterion based on which the parents are selected. This can be a fitness proportional selection for all methods. The number of parents (i.e., nParents) is computed such that the population size is kept constant. \\\\ $nParents = ceil(\frac{1 + \sqrt{1+4*popSize}}{2})$. \\\\ The number of children (i.e., nChildren) is computed by \\\\ @@ -187,11 +187,11 @@ def getInputSpecification(cls): This will result in a popSize a little larger than the initial one, and the excessive children will be later thrawn away and only the first popSize child will be kept. \\\\ You can choose three options for parentSelection: \begin{itemize} - \item \textit{rouletteWheel} - It assigns probabilities to chromosomes based on their fitness, + \item \textit{rouletteWheel} - It assigns probabilities to chromosomes based on their fitness, allowing for selection proportionate to their likelihood of being chosen for reproduction. - \item \textit{tournamentSelection} - Chromosomes are randomly chosen from the population to compete in a tournament, + \item \textit{tournamentSelection} - Chromosomes are randomly chosen from the population to compete in a tournament, and the fittest individual among them is selected for reproduction. - \item \textit{rankSelection} - Chromosomes with higher fitness values are selected. + \item \textit{rankSelection} - Chromosomes with higher fitness values are selected. \end{itemize} """) GAparams.addSub(parentSelection) @@ -262,11 +262,11 @@ def getInputSpecification(cls): printPriority=108, descr=r"""a subnode containing the implemented survivor selection mechanisms. You can choose one of the survivor selection options listed below: \begin{itemize} - \item \textit{fitnessBased} - Individuals with higher fitness scores are more likely to be selected to survive and + \item \textit{fitnessBased} - Individuals with higher fitness scores are more likely to be selected to survive and proceed to the next generation. It suppoort only single-objective optimization problem. - \item \textit{ageBased} - Individuals are selected for survival based on their age or generation, with older individuals being prioritized + \item \textit{ageBased} - Individuals are selected for survival based on their age or generation, with older individuals being prioritized for retention. It suppoort only single-objective optimization problem. - \item \textit{rankNcrowdingBased} - Individuals with low rank and crowding distance are more likely to be selected to survive and + \item \textit{rankNcrowdingBased} - Individuals with low rank and crowding distance are more likely to be selected to survive and proceed to the next generation. It suppoort only multi-objective optimization problem. \end{itemize}""") GAparams.addSub(survivorSelection) @@ -277,19 +277,19 @@ def getInputSpecification(cls): printPriority=108, descr=r"""a subnode containing the implemented fitness functions.You can choose one of the fitness options listed below: \begin{itemize} - \item \textit{invLinear} - It assigns fitness values inversely proportional to the individual's objective function values, + \item \textit{invLinear} - It assigns fitness values inversely proportional to the individual's objective function values, prioritizing solutions with lower objective function values (i.e., minimization) for selection and reproduction. It suppoort only single-objective optimization problem.\\\\ $fitness = -a \times obj - b \times \sum_{j=1}^{nConstraint} max(0,-penalty_{j}) $\\ where j represents an index of objects \\ \item \textit{logistic} - It applies a logistic function to transform raw objective function values into fitness scores. It suppoort only single-objective optimization problem.\\\\ - $fitness = \frac{1}{1+e^{a\times(obj-b)}}$\\ - \item \textit{feasibleFirst} - It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions, + $fitness = \frac{1}{1+e^{a\times(obj-b)}}$\\ + \item \textit{feasibleFirst} - It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions, encouraging the evolution of individuals that satisfy the problem's constraints. It suppoort single-and multi-objective optimization problem.\\\\ $fitness = \left\{\begin{matrix} -obj & g_{j}(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right$\\ - \end{itemize} """) + \end{itemize} """) fitness.addParam("type", InputTypes.StringType, True, descr=r"""[invLin, logistic, feasibleFirst]""") objCoeff = InputData.parameterInputFactory('a', strictMode=True, @@ -462,11 +462,11 @@ def handleInput(self, paramInput): else: pass self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None - - # TODO: @JunyungKim, the code lines below are for 'feasibleFirst' temperarily. It should be generalized for invLinear as well. + + # TODO: @JunyungKim, the code lines below are for 'feasibleFirst' temperarily. It should be generalized for invLinear as well. if self._fitnessType == 'feasibleFirst': # Case 1: There is constraint(s) and penaltyCoeff are given by users - if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: + if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None: self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else [1] * len(self._objectiveVar) self._penaltyCoeff = fitnessNode.findFirst('b').value # Case 2: There is NO constraint and penaltyCoeff are given by users @@ -479,7 +479,7 @@ def handleInput(self, paramInput): # Case 4: There is NO constraint and penaltyCoeff is NOT given by users else: self._objCoeff = [1] * len(self._objectiveVar) - self._penaltyCoeff = [0] * len(self._objectiveVar) + self._penaltyCoeff = [0] * len(self._objectiveVar) else: self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None @@ -695,7 +695,7 @@ def _useRealization(self, info, rlz): # newMultiBestObjective[i,1], str(self.batchId)) # # plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png') # plt.savefig('PF_'+str(self.batchId)+'.png') - + # Step 2 @ n: Parent selection from population # Pair parents together by indexes parents = self._parentSelectionInstance(self.population, @@ -1005,7 +1005,7 @@ def _collectOptPointMulti(self, population, rank, CD, objVal, fitness, constrain self.multiBestRank = optRank self.multiBestCD = optCD - return + return def _checkAcceptability(self, traj): diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py index 332daeef5d..db58da1754 100644 --- a/ravenframework/Optimizers/fitness/fitness.py +++ b/ravenframework/Optimizers/fitness/fitness.py @@ -59,9 +59,9 @@ def invLinear(rlz,**kwargs): the farthest from violating the constraint it is, The highest negative value it have the largest the violation is. @ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome. """ - #NOTE invLinear is not yet support Multi-objective optimization problem solving. Further literature reivew applying invLinear method to multi-objective optimization + #NOTE invLinear is not yet support Multi-objective optimization problem solving. Further literature reivew applying invLinear method to multi-objective optimization # needs to be involved. Potentially, applying obj_Worst in fitness function (i.e., -a[j] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[j] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1)) - # should be considerd . + # should be considerd . a = [1.0] if kwargs['a'] == None else kwargs['a'] b = [10.0] if kwargs['b'] == None else kwargs['b'] penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data diff --git a/ravenframework/Optimizers/survivorSelection/survivorSelection.py b/ravenframework/Optimizers/survivorSelection/survivorSelection.py index 8d195fbadd..bb0a738ea4 100644 --- a/ravenframework/Optimizers/survivorSelection/survivorSelection.py +++ b/ravenframework/Optimizers/survivorSelection/survivorSelection.py @@ -13,7 +13,7 @@ # limitations under the License. """ Implementation of survivorSelection step for new generation - selection process in Genetic Algorithm. + selection process in Genetic Algorithm. Created Apr,3,2024 @authors: Mohammad Abdo, Junyung Kim From 289094e595fe160cb1cce5e8154323008cce13f8 Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 23 May 2024 14:54:59 -0600 Subject: [PATCH 78/84] Issues created due to having objective variable type be List are partially fixed. No errors of BayesianOptimizer tests. Diff still exist. --- .../Optimizers/BayesianOptimizer.py | 29 ++++++++++--------- .../ExpectedImprovement.py | 4 +-- .../LowerConfidenceBound.py | 4 +-- .../ProbabilityOfImprovement.py | 2 +- 4 files changed, 20 insertions(+), 19 deletions(-) diff --git a/ravenframework/Optimizers/BayesianOptimizer.py b/ravenframework/Optimizers/BayesianOptimizer.py index 8378e334c6..ff1d743727 100644 --- a/ravenframework/Optimizers/BayesianOptimizer.py +++ b/ravenframework/Optimizers/BayesianOptimizer.py @@ -150,6 +150,7 @@ def __init__(self): self._paramSelectionOptions = {'ftol':1e-10, 'maxiter':200, 'disp':False} # Optimizer options for hyperparameter selection self._externalParamOptimizer = 'fmin_l_bfgs_b' # Optimizer for external hyperparameter selection self._resetModel = False # Reset regression model if True + self._canHandleMultiObjective = False # boolean indicator whether optimization is a sinlge-objective problem or a multi-objective problem def handleInput(self, paramInput): """ @@ -232,8 +233,8 @@ def initialize(self, externalSeeding=None, solutionExport=None): elif len(self._model.supervisedContainer[0].target) != 1: self.raiseAnError(RuntimeError, f'Only one target allowed when using GPR ROM for Bayesian Optimizer! ' f'Received {len(self._model.supervisedContainer[0].target)}') - elif self._objectiveVar not in self._model.supervisedContainer[0].target: - self.raiseAnError(RuntimeError, f'GPR ROM should be obective variable: {self._objectiveVar}, ' + elif self._objectiveVar[0] not in self._model.supervisedContainer[0].target: + self.raiseAnError(RuntimeError, f'GPR ROM should be obective variable: {self._objectiveVar[0]}, ' f'Received {self._model.supervisedContainer[0].target}') if self._resetModel: @@ -265,8 +266,8 @@ def initialize(self, externalSeeding=None, solutionExport=None): trainingData = self.normalizeData(trainingData) for varName in self.toBeSampled.keys(): self._trainingInputs[0][varName] = list(trainingData[varName]) - self._trainingTargets.append(list(trainingData[self._objectiveVar])) - self.raiseAMessage(f"{self._model.name} ROM has been already trained with {len(trainingData[self._objectiveVar])} samples!", + self._trainingTargets.append(list(trainingData[self._objectiveVar[0]])) + self.raiseAMessage(f"{self._model.name} ROM has been already trained with {len(trainingData[self._objectiveVar[0]])} samples!", "This pre-trained ROM will be used by Optimizer to evaluate the next best point!") # retrieving the best solution is based on the acqusition function's utility # Constraints are considered in the following method. @@ -333,7 +334,7 @@ def _useRealization(self, info, rlz): # Add new inputs and model evaluations to the dataset for varName in list(self.toBeSampled): self._trainingInputs[traj][varName].extend(getattr(rlz, varName).values) - self._trainingTargets[traj].extend(getattr(rlz, self._objectiveVar).values) + self._trainingTargets[traj].extend(getattr(rlz, self._objectiveVar[0]).values) # Generate posterior with training data self._generatePredictiveModel(traj) self._resolveMultiSample(traj, rlz, info) @@ -343,10 +344,10 @@ def _useRealization(self, info, rlz): # Add new input and model evaluation to the dataset for varName in list(self.toBeSampled): self._trainingInputs[traj][varName].append(rlz[varName]) - self._trainingTargets[traj].append(rlz[self._objectiveVar]) + self._trainingTargets[traj].append(rlz[self._objectiveVar[0]]) # Generate posterior with training data self._generatePredictiveModel(traj) - optVal = rlz[self._objectiveVar] + optVal = rlz[self._objectiveVar[0]] self._resolveNewOptPoint(traj, rlz, optVal, info) # Use acquisition to select next point @@ -555,7 +556,7 @@ def _trainRegressionModel(self, traj): for varName in list(self.toBeSampled): trainingSet[varName] = np.asarray(self._trainingInputs[traj][varName]) - trainingSet[self._objectiveVar] = np.asarray(self._trainingTargets[traj]) + trainingSet[self._objectiveVar[0]] = np.asarray(self._trainingTargets[traj]) self._model.train(trainingSet) # NOTE It would be preferrable to use targetEvaluation; # however, there does not appear a built in normalization method and as @@ -596,8 +597,8 @@ def _evaluateRegressionModel(self, featurePoint): # Evaluating the regression model resultsDict = self._model.evaluate(featurePoint) # NOTE only allowing single targets, needs to be fixed when multi-objective optimization is added - mu = resultsDict[self._objectiveVar] - std = resultsDict[self._objectiveVar+'_std'] + mu = resultsDict[self._objectiveVar[0]] + std = resultsDict[self._objectiveVar[0]+'_std'] return mu, std # * * * * * * * * * * * * @@ -627,7 +628,7 @@ def _resolveMultiSample(self, traj, rlz, info): for index in range(info['batchSize']): for varName in rlzVars: singleRlz[varName] = getattr(rlz, varName)[index].values - optVal = singleRlz[self._objectiveVar] + optVal = singleRlz[self._objectiveVar[0]] self._resolveNewOptPoint(traj, singleRlz, optVal, info) singleRlz = {} # FIXME is this necessary? self.raiseADebug(f'Multi-sample resolution completed') @@ -664,7 +665,7 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): currentPoint = {} for decisionVarName in list(self.toBeSampled): currentPoint[decisionVarName] = rlz[decisionVarName] - rlz[self._objectiveVar] = self._evaluateRegressionModel(currentPoint)[0][0] + rlz[self._objectiveVar[0]] = self._evaluateRegressionModel(currentPoint)[0][0] self.raiseADebug('*' * 80) if acceptable in ['accepted', 'first']: # record history @@ -675,13 +676,13 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info): # If the last recommended solution point is the same, update the expected function value if all(old[var] == xStar[var] for var in list(self.toBeSampled)): newEstimate = copy.copy(old) - newEstimate[self._objectiveVar] = muStar + newEstimate[self._objectiveVar[0]] = muStar self._optPointHistory[traj].append((newEstimate, info)) else: newRealization = copy.copy(old) for var in list(self.toBeSampled): newRealization[var] = xStar[var] - newRealization[self._objectiveVar] = muStar + newRealization[self._objectiveVar[0]] = muStar else: self.raiseAnError(f'Unrecognized acceptability: "{acceptable}"') diff --git a/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py b/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py index eac639237a..360765d3ae 100644 --- a/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py +++ b/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py @@ -67,7 +67,7 @@ def evaluate(self, var, bayesianOptimizer, vectorized=False): """ # Need to retrieve current optimum point best = bayesianOptimizer._optPointHistory[0][-1][0] - fopt = best[bayesianOptimizer._objectiveVar] + fopt = best[bayesianOptimizer._objectiveVar[0]] # Need to convert array input "x" into dict point featurePoint = bayesianOptimizer.arrayToFeaturePoint(var) @@ -112,7 +112,7 @@ def gradient(self, var, bayesianOptimizer): # Need to retrieve current optimum point best = bayesianOptimizer._optPointHistory[0][-1][0] - fopt = best[bayesianOptimizer._objectiveVar] + fopt = best[bayesianOptimizer._objectiveVar[0]] # Other common quantities beta = (fopt - mu)/s phi = norm.pdf(beta) diff --git a/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py b/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py index b3b8e1311a..787c204d5f 100644 --- a/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py +++ b/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py @@ -197,8 +197,8 @@ def _converged(self, bayesianOptimizer): if self._optValue is None: converged = False return converged - optDiff = np.absolute(-1*self._optValue - bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar]) - optDiff /= np.absolute(bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar]) + optDiff = np.absolute(-1*self._optValue - bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar[0]]) + optDiff /= np.absolute(bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar[0]]) if optDiff <= bayesianOptimizer._acquisitionConv: converged = True else: diff --git a/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py b/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py index 7f66d31dc5..04742a2b1a 100644 --- a/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py +++ b/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py @@ -129,7 +129,7 @@ def evaluate(self, var, bayesianOptimizer, vectorized=False): """ # Need to retrieve current optimum point best = bayesianOptimizer._optPointHistory[0][-1][0] - fopt = best[bayesianOptimizer._objectiveVar] + fopt = best[bayesianOptimizer._objectiveVar[0]] # Need to convert array input "x" into dict point featurePoint = bayesianOptimizer.arrayToFeaturePoint(var) From 7b66055863e3172bc08138697b2749487c7562fe Mon Sep 17 00:00:00 2001 From: Junyung Kim Date: Thu, 23 May 2024 16:24:34 -0600 Subject: [PATCH 79/84] MultiObjective_Beale-Bealeflipped is added. --- .../MultiObjectiveBeale-Bealeflipped.xml | 125 ++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml new file mode 100644 index 0000000000..06dbd7d901 --- /dev/null +++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml @@ -0,0 +1,125 @@ + + + + raven/tests/framework/Optimizers/GeneticAlgorithms.NSGAII + Mohammad Abdo + 2024-02-18 + + NSGA-II min-max test + + + + Multi_beale_bealeFlipped + optimize,print + 4 + + + + + placeholder + beale + GAopt + opt_export + optOut + opt_export + + + opt_export + optOut + opt_export + optOut + + + + + + x,y,obj1,obj2 + + + + + + 0 + 5 + + + + + + + 5 + 42 + every + min, max + + + 50 + tournamentSelection + + + 0.8 + + + 0.8 + + + + + + rankNcrowdingBased + + + 0.0 + + + woRep_dist + + + woRep_dist + + obj1, obj2 + optOut + MC_samp + + + + + + + + 50 + 050877 + + + woRep_dist + + + woRep_dist + + + + + + + + x,y + obj1, obj2 + + + trajID + x,y,obj1,obj2,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,accepted + + + + + + csv + optOut + + + csv + opt_export + trajID + + + \ No newline at end of file From 542943ed6441d551bc239f181af4e691950993a0 Mon Sep 17 00:00:00 2001 From: Juan Luque-Gutierrez Date: Wed, 17 Jul 2024 23:40:31 -0600 Subject: [PATCH 80/84] [WIP] Crossover.py and GeneticAlgorithm.py have been modified to include Partially Mapped Crossover (Two Points) --- ravenframework/Optimizers/GeneticAlgorithm.py | 4 +- .../crossOverOperators/crossovers.py | 102 +++++++++++++++++- 2 files changed, 102 insertions(+), 4 deletions(-) diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py index dc4b447e09..243ed9e933 100644 --- a/ravenframework/Optimizers/GeneticAlgorithm.py +++ b/ravenframework/Optimizers/GeneticAlgorithm.py @@ -398,8 +398,8 @@ def handleInput(self, paramInput): #################################################################################### crossoverNode = reproductionNode.findFirst('crossover') self._crossoverType = crossoverNode.parameterValues['type'] - if self._crossoverType not in ['onePointCrossover','twoPointsCrossover','uniformCrossover']: - self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support onePointCrossover, twoPointsCrossover and uniformCrossover as a crossover, whereas provided crossover is {self._crossoverType}') + if self._crossoverType not in ['onePointCrossover','twoPointsCrossover','uniformCrossover', 'partiallyMappedCrossover']: + self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support onePointCrossover, twoPointsCrossover, uniformCrossover and partiallyMappedCrossover as a crossover, whereas provided crossover is {self._crossoverType}') if crossoverNode.findFirst('points') is None: self._crossoverPoints = None else: diff --git a/ravenframework/Optimizers/crossOverOperators/crossovers.py b/ravenframework/Optimizers/crossOverOperators/crossovers.py index f03d03ef07..51867a1b5b 100644 --- a/ravenframework/Optimizers/crossOverOperators/crossovers.py +++ b/ravenframework/Optimizers/crossOverOperators/crossovers.py @@ -15,9 +15,13 @@ Implementation of crossovers for crossover process of Genetic Algorithm currently the implemented crossover algorithms are: 1. OnePoint Crossover + 2. TwoPoints Crossover + 3. Uniform Crossover + 4. TwoPoints Partially Mapped Crossover (PMX) Created June,16,2020 - @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi + Last update July,8,2024 + @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Juan Luque-Gutierrez """ import numpy as np @@ -151,11 +155,57 @@ def twoPointsCrossover(parents, **kwargs): return children +def partiallyMappedCrossover(parents, **kwargs): + """ + Method designed to perform a two point partially mapped crossover (MPX) on 2 parents: + Partition each parents in three sequences (A,B,C): + parent1 = A1 B1 C1 + parent2 = A2 B2 C2 + Then: + children1 = A1* B2 C1* + children2 = A2* B1 C2* + Children should have the same elements as their parents, but in different order. + This crossover preserves the genes in a chromosome. + @ In, parents, xr.DataArray, parents involved in the mating process + @ In, kwargs, dict, dictionary of parameters for this mutation method: + parents, 2D array, parents in the current mating process. + Shape is nParents x len(chromosome) i.e, number of Genes/Vars + crossoverProb, float, crossoverProb determines when child takes genes from a specific parent, default is random + points, integer, point at which the cross over happens, default is random + @ Out, children, xr.DataArray, children resulting from the crossover. Shape is nParents x len(chromosome) i.e, number of Genes/Vars + """ + nParents, nGenes = np.shape(parents) + children = xr.DataArray(np.zeros((int(2*comb(nParents,2)), np.shape(parents)[1])), + dims = ['chromosome', 'Gene'], + coords = {'chromosome': np.arange(int(2*comb(nParents, 2))), + 'Gene':parents.coords['Gene'].values}) + parentPairs = list(combinations(parents, 2)) + index = 0 + if nGenes <= 2: + ValueError('The number of genes should be >= 3') + for couples in parentPairs: + [loc1, loc2] = randomUtils.randomChoice(list(range(1, nGenes)), size = 2, replace=False, engine=None) + if loc1 > loc2: + locL = loc2 + locU = loc1 + else: + locL = loc1 + locU = loc2 + parent1 = couples[0] + parent2 = couples[1] + children1, children2 = twoPointsPMXMethod(parent1, parent2, locL, locU) + + children[index] = children1 + children[index + 1] = children2 + index = index + 2 + + return children + __crossovers = {} __crossovers['onePointCrossover'] = onePointCrossover __crossovers['twoPointsCrossover'] = twoPointsCrossover __crossovers['uniformCrossover'] = uniformCrossover - +__crossovers['partiallyMappedCrossover'] = partiallyMappedCrossover def returnInstance(cls, name): """ @@ -215,3 +265,51 @@ def uniformCrossoverMethod(parent1,parent2,crossoverProb): children2[pos] = parent2[pos] return children1,children2 + +def twoPointsPMXMethod(parent1, parent2, locL, locU): + """ + Method designed to perform a two point Partially Mapped Crossover (PMX) on 2 arrays: + Partition each array into three sequences (A, B, C): + parent1 = A1 B1 C1 + parent2 = A2 B2 C2 + We map the values contained in B1 to B2. + Then: + children1 = X B2 X + children2 = X B1 X + We verify if the values in A and C are found in B for each children. If so, we + replace such values for the ones in the map. + children1 = A1* B2 C1* + children2 = A2* B1 C2* + Children should have the same elements as their parents, but in different order. + @ In, parent1, first array + @ In, parent2, second array + @ In, locL: first location + @ In, LocU: second location + @ Out, children1: first generated array + @ Out, children2: second generated array + """ + + size = len(parent1) + + children1 = parent1.copy(deep=True) + children2 = parent2.copy(deep=True) + + seqB1 = parent1.values[locL:locU] + seqB2 = parent2.values[locL:locU] + + children1[locL:locU] = seqB2 + children2[locL:locU] = seqB1 + + # Determine mapping relationship + mapping1 = {parent2.values[i]: parent1.values[i] for i in range(locL, locU)} + mapping2 = {parent1.values[i]: parent2.values[i] for i in range(locL, locU)} + + for i in list(range(locL)) + list(range(locU, size)): + if children1.values[i] in mapping1: + while children1.values[i] in mapping1: + children1.values[i] = mapping1[children1.values[i]] + if children2.values[i] in mapping2: + while children2.values[i] in mapping2: + children2.values[i] = mapping2[children2.values[i]] + + return children1, children2 \ No newline at end of file From e13f1906204e26c545688e3a352090cc20dc1914 Mon Sep 17 00:00:00 2001 From: Juan Luque-Gutierrez Date: Thu, 18 Jul 2024 23:00:15 -0600 Subject: [PATCH 81/84] Interface changes for multicycle optimization 7/18 --- .../GeneticAlgorithms/Models/Constraints.py | 8 +- .../GeneticAlgorithms/Models/LocalSum.py | 4 +- .../SIMULATE3/SimulateData.py | 178 +++++++++++++----- .../SIMULATE3/SpecificParser.py | 157 ++++++++++++++- 4 files changed, 284 insertions(+), 63 deletions(-) diff --git a/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py b/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py index 6ddac6dce2..d56fec0f4d 100644 --- a/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py +++ b/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py @@ -44,7 +44,7 @@ def YY(Input):#Complete this: give the function the correct name# return g -def XX(Input):#You are free to pick this name but it has to be similar to the one in the xml# +def expConstr1(Input): #You are free to pick this name but it has to be similar to the one in the xml# """ Let's assume that the constraint is: $ x3+x4 < 8 $ @@ -54,7 +54,7 @@ def XX(Input):#You are free to pick this name but it has to be similar to the on @ In, Input, object, RAVEN container @ out, g, float, explicit constraint 1 evaluation function """ - g = # Write the explicit constraint here + g = Input.x3 + Input.x4 - 8 # Write the explicit constraint here return g def expConstr2(Input): @@ -76,7 +76,7 @@ def impConstr1(Input): @ In, Input, object, RAVEN container @ out, g, float, implicit constraint 1 evaluation function """ - return 10 - Input.x1**2 - Input.obj + return 10 - Input.x1**2 - Input.ans def impConstr2(Input): """ @@ -85,5 +85,5 @@ def impConstr2(Input): @ In, Input, object, RAVEN container @ out, g, float, implicit constraint 2 evaluation function """ - g = Input.x1**2 + Input.obj - 10 + g = Input.x1**2 + Input.ans - 10 return g diff --git a/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py b/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py index 3b07d28589..d04b4f915f 100644 --- a/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py +++ b/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py @@ -30,7 +30,7 @@ def evaluate(Inputs): Sum = 0 for ind,var in enumerate(Inputs.keys()): # write the objective function here - Sum += + Sum += (ind+1) * Inputs[var] return Sum[:] def run(self,Inputs): @@ -40,4 +40,4 @@ def run(self,Inputs): @ In, Inputs, dict, additional inputs @ Out, None """ - self.?? = evaluate(Inputs) # Complete This # make sure the name of the objective is consistent obj + self.ans = evaluate(Inputs) # Complete This # make sure the name of the objective is consistent obj diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index 817a7d5381..77c4bfd8ed 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -36,13 +36,15 @@ def __init__(self,filen): # retrieve data self.data['axial_mesh'] = self.axialMeshExtractor() self.data['keff'] = self.coreKeffEOC() - self.data['FDeltaH'] = self.maxFDH() + self.data["MaxFDH"] = self.maxFDH() self.data["kinf"] = self.kinfEOC() - self.data["boron"] = self.boronEOC() + self.data["max_boron"] = self.boronEOC() self.data["cycle_length"] = self.EOCEFPD() - self.data["PinPowerPeaking"] = self.pinPeaking() + self.data["pin_peaking"] = self.pinPeaking() self.data["exposure"] = self.burnupEOC() - self.data["assembly_power"] = self.assemblyPeakingFactors() + # self.data["assembly_power"] = self.assemblyPeakingFactors() + # self.data["fuel_cost"] = self.fuel_cost() + # this is a dummy variable for demonstration with MOF # check if something has been found if all(v is None for v in self.data.values()): @@ -142,52 +144,52 @@ def coreKeffEOC(self): return outputDict - def assemblyPeakingFactors(self): - """ - Extracts the assembly radial power peaking factors as a dictionary - with the depletion step in GWD/MTU as the dictionary keys. - @ In, None - @ Out, outputDict, dict, the dictionary containing the read data (None if none found) - {'info_ids':list(of ids of data), - 'values': list} - """ - radialPowerDictionary = {} - searching_ = False - outputDict = None - for line in self.lines: - if "Case" in line and "GWd/MT" in line: - elems = line.strip().split() - depl = elems[-2] - if depl in radialPowerDictionary: - pass - else: - radialPowerDictionary[depl] = {} - if "** H- G- F- E- D- C- B- A- **" in line: - searching_ = False - - if searching_: - elems = line.strip().split() - if elems[0] == "**": - posList = elems[1:-1] - else: - radialPowerDictionary[depl][elems[0]] = {} - for i,el in enumerate(elems[1:-1]): - radialPowerDictionary[depl][elems[0]][posList[i]] = float(el) - - if "PRI.STA 2RPF - Assembly 2D Ave RPF - Relative Power Fraction" in line: - searching_ = True - - if not radialPowerDictionary: - return ValueError("No values returned. Check Simulate File executed correctly") - else: - maxPeaking = 0.0 - for depl in radialPowerDictionary: - for row in radialPowerDictionary[depl]: - for col in radialPowerDictionary[depl][row]: - maxPeaking = max(radialPowerDictionary[depl][row][col],maxPeaking) - outputDict = {'info_ids':['FA_peaking'], 'values': [maxPeaking] } - - return outputDict + # def assemblyPeakingFactors(self): + # """ + # Extracts the assembly radial power peaking factors as a dictionary + # with the depletion step in GWD/MTU as the dictionary keys. + # @ In, None + # @ Out, outputDict, dict, the dictionary containing the read data (None if none found) + # {'info_ids':list(of ids of data), + # 'values': list} + # """ + # radialPowerDictionary = {} + # searching_ = False + # outputDict = None + # for line in self.lines: + # if "Case" in line and "GWd/MT" in line: + # elems = line.strip().split() + # depl = elems[-2] + # if depl in radialPowerDictionary: + # pass + # else: + # radialPowerDictionary[depl] = {} + # if "** H- G- F- E- D- C- B- A- **" in line: + # searching_ = False + + # if searching_: + # elems = line.strip().split() + # if elems[0] == "**": + # posList = elems[1:-1] + # else: + # radialPowerDictionary[depl][elems[0]] = {} + # for i,el in enumerate(elems[1:-1]): + # radialPowerDictionary[depl][elems[0]][posList[i]] = float(el) + + # if "PRI.STA 2RPF - Assembly 2D Ave RPF - Relative Power Fraction" in line: + # searching_ = True + + # if not radialPowerDictionary: + # return ValueError("No values returned. Check Simulate File executed correctly") + # else: + # maxPeaking = 0.0 + # for depl in radialPowerDictionary: + # for row in radialPowerDictionary[depl]: + # for col in radialPowerDictionary[depl][row]: + # maxPeaking = max(radialPowerDictionary[depl][row][col],maxPeaking) + # outputDict = {'info_ids':['FA_peaking'], 'values': [maxPeaking] } + + # return outputDict def EOCEFPD(self): """ @@ -256,6 +258,8 @@ def pinPeaking(self): spot = elems.index('Max-3PIN') list_.append(float(elems[spot+1])) + print(f"This is Fq={max(list_)}") + if not list_: return ValueError("No values returned. Check Simulate File executed correctly") else: @@ -486,6 +490,80 @@ def burnupEOC(self): return outputDict + def fuel_cost(self): + """ + Extracts the fuel types used in the core map and calculates the fuel cost based on a front end approach. + This function applies only to quarter core symmetries. + @ In, Non + @ Out, outputDict, dict, the dictionary containing the rea data (None if non found) + {'info_ids': list(of ids of data), + 'values': list} + """ + outputDict = None + # First, we need to parse the core map from the output file. + # NOTE: Given that a run is not needed to know the Loading Pattern, this function could be on the input side. + FA_list = [] + for line in self.lines: + if "'FUE.TYP'" in line: + p1 = line.index(",") + p2 = line.index("/") + search_space = line[p1:p2] + search_space = search_space.replace(",","") + temp = search_space.split() + for i in temp: + FA_list.append(float(i)) + FA_types = list(set(FA_list)) + quartcore_size = len(temp) + + # We separate the core map depending on how many times their elements are counted in the symmetry: + # FA_list_A counted once, as it is the center of the core. + # FA_list_B counted twice, as they are are the centerlines. + # FA_list_C counted four times, as they are are the rest of fuel assemblies. + FA_list_A = FA_list[0] + FA_list_B = FA_list[1:quartcore_size] + FA_list[quartcore_size:quartcore_size*(quartcore_size-1)+1:quartcore_size] + FA_list_C = [] + for i in range(quartcore_size-1): + FA_list_C.append(FA_list[(i+1)*quartcore_size + 1: (i+2)*quartcore_size]) + FA_list_C = [item for sublist in FA_list_C for item in sublist] # To flatten FA_list_C + # Now we proceed to count how many fuel types of each type are there in our core. + FA_count_A = [float(fa == FA_list_A) for fa in FA_types] + FA_count_B = [float(FA_list_B.count(fa)*2) for fa in FA_types] + FA_count_C = [float(FA_list_C.count(fa)*4) for fa in FA_types] + FA_count = [FA_count_A[j] + FA_count_B[j] + FA_count_C[j] for j in range(len(FA_types))] + # And create a dictionary with all the fuel types count. + FA_types_dict = {int(FA_types[i]):FA_count[i] for i in range(len(FA_types))} + + # Dictionary with the unit cost for each FA type. + + # FA type 0 = empty -> M$ 0.0 + # FA type 1 = reflector -> M$ 0.0 + # FA type 2 = 2.00 wt% -> M$ 2.69520839 + # FA type 3 = 2.50 wt% -> M$ 3.24678409 + # FA type 4 = 2.50 wt% + Gd -> M$ 3.24678409 + # FA type 5 = 3.20 wt% -> M$ 4.03739539 + # FA type 6 = 3.20 wt% + Gd -> M$ 4.03739539 + # The cost of burnable poison is not being considered. + + cost_dict = { + 0: 0, + 1: 0, + 2: 2.69520839, + 3: 3.24678409, + 4: 3.24678409, + 5: 4.03739539, + 6: 4.03739539 + } + + fuel_cost = 0 + for fuel_type, fuel_count in FA_types_dict.items(): + fuel_cost += fuel_count * cost_dict[fuel_type] + + if not fuel_cost: + return ValueError("No values returned. Check Simulate file executed correctly.") + else: + outputDict = {'info_ids':['fuel_cost'], 'values': [fuel_cost]} + return outputDict + def writeCSV(self, fileout): """ Print Data into CSV format diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py index a59a96a913..d103aafc87 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py @@ -48,6 +48,12 @@ def getParameters(self): self.symmetry = root.find('symmetry').text.strip() self.numberAssemblies = int(root.find('number_assemblies').text) self.reflectorFlag = root.find('reflector').text.strip() + self.activeHeight = root.find('active_height').text.strip() + self.bottomReflectorTypeNumber = root.find('bottom_reflector_typenumber').text.strip() + self.topReflectorTypeNumber = root.find('top_reflector_typenumber').text.strip() + self.freshFaDict = [] + for freshFa in root.iter('FreshFA'): + self.freshFaDict.append(freshFa.attrib) self.faDict = [] for fa in root.iter('FA'): self.faDict.append(fa.attrib) @@ -115,23 +121,56 @@ def generateSim3Input(self, parameter): file_.write(f"'DIM.PWR' {parameter.coreWidth}/\n") file_.write(f"'DIM.CAL' {parameter.axialNodes} 2 2/\n") file_.write("'DIM.DEP' 'EXP' 'PIN' 'HTMO' 'HBOR' 'HTF' 'XEN' 'SAM' 'EBP'/ \n") + file_.write("'ERR.CHK' 'PERMIT'/\n") file_.write("\n") - loadingPattern = getMap(parameter, [int(child.attrib['FAid']) for child in self.data]) - file_.write(loadingPattern) # later, from get map + if parameter.batchNumber >=2: + file_.write("'FUE.LAB', 6/\n") + shufflingScheme = getShufflingScheme(parameter, [int(child.attrib['FAid']) for child in self.data]) + file_.write(shufflingScheme) + else: + loadingPattern = getMap(parameter, [int(child.attrib['FAid']) for child in self.data]) + file_.write(loadingPattern) # later, from get map file_.write("\n") + # if parameter.batchNumber <=1: + # pass + # else: + # raise ValueError("Larger batch number is not available for this version") if parameter.batchNumber <=1: - pass - else: - raise ValueError("Larger batch number is not available for this version") - file_.write(f"'LIB' '../../{parameter.csLib}' \n") + file_.write(f"'LIB' '../../{parameter.csLib}' \n") file_.write(f"'COR.OPE' {parameter.power}, {parameter.flow}, {parameter.pressure}/\n") file_.write("'COR.TIN' {}/ \n".format(parameter.inletTemperature)) file_.write("\n") if parameter.batchNumber >= 2: + # file_.write("'COM' SERIAL NUMBER TO FUEL BATCH \n") + # file_.write("'COM' LABEL CREATE TYPE NUMBER \n") + # for item in parameter.freshFaDict: + # file_.write(f"'FUE.NEW', 'TYPE{item['type']}', '{item['serial_label']}{parameter.batchNumber}00', {item['quantity']}, {item['type']}, ,, {parameter.batchNumber}/\n") + file_.write("\n") + file_.write(f"'RES' '../../{parameter.restartFile}' {parameter.loadPoint}/\n") + file_.write(f"'LIB' '../../{parameter.csLib}' \n") file_.write(f"'BAT.LAB' {parameter.batchNumber} 'CYC-{parameter.batchNumber}' /\n") + file_.write("\n") + # for item in parameter.freshFaDict: + # file_.write(f"'SEG.LIB' {item['type']} '{item['name']}'/\n") + # file_.write(f"'FUE.ZON' {item['type']}, 1, '{item['name']}' {parameter.bottomReflectorTypeNumber},0.0 {item['type']}, {parameter.activeHeight} {parameter.topReflectorTypeNumber}/\n") + # file_.write("\n") + file_.write("'DEP.STA' 'BOS' 0.0/\n") + file_.write("'DEP.FPD' 2 .5/ * Equilibrium I and Xe, update Pm and Sm by depletion, depletion time subinterval is 0.5 hrs \n") file_.write(f"'DEP.CYC' 'CYCLE{parameter.batchNumber}' 0.0 {parameter.batchNumber}/\n") + file_.write("\n") + file_.write("'ITE.BOR' 1400/ * Estimate of critical boron concentration \n") + file_.write("\n") + file_.write("'STA'/\n") + + file_.write("\n") file_.write(f"'DEP.STA' 'AVE' 0.0 0.5 1 2 -1 {parameter.depletion} /\n") file_.write("'ITE.SRC' 'SET' 'EOLEXP' , , 0.02, , , , , , 'MINBOR' 10., , , , , 4, 4, , , /\n") + file_.write("\n") + # file_.write("'This is just a test' /\n") + # file_.write(f"This is the active height: {parameter.activeHeight}/\n") + if parameter.batchNumber >= 2: + file_.write("'FUE.INI', 'JILAB'/\n") + file_.write(f"'WRE' 'cycle{parameter.batchNumber}.res'/\n") file_.write("'STA'/\n") file_.write("'END'/\n") file_.close() @@ -187,6 +226,53 @@ def getMap(parameter, locationList): return loadingPattern +def getShufflingScheme(parameter, locationList): + """ + Genrate Shuffling Scheme + @ In, parameter, DataParser Object Instance, Instance store the parameter data + @ In, locationList, list, Location list from PerturbedPaser class + @ Out, shufflingScheme, str, Shuffling Scheme + """ + maxType = max([id['type'] for id in parameter.faDict]) + print(maxType) + numberSpaces = len(str(maxType)) + 3 + print(numberSpaces) + problemMap = getCoreMap(parameter.mapSize, parameter.symmetry, + parameter.numberAssemblies, parameter.reflectorFlag) + rowCount = 1 + shufflingScheme = "" + faDict = parameter.faDict + # print(faDict) + for row in range(25): #max core 25x25 + if row in problemMap: + if rowCount <= 9: + shufflingScheme += f"{rowCount} 1 " + else: + shufflingScheme += f"{rowCount} 1 " + for col in range(25): + if col in problemMap[row]: + if not problemMap[row][col]: + if isinstance(problemMap[row][col], int): + geneNumber = problemMap[row][col] + gene = locationList[geneNumber] + value = findType(gene,faDict) + str_ = f"{value}" + shufflingScheme += f"{str_.ljust(numberSpaces)}" + else: + shufflingScheme += f"{' '.ljust(numberSpaces)}" + else: + geneNumber = problemMap[row][col] + gene = locationList[geneNumber] + value = findType(gene,faDict) + str_ = f"{value}" + shufflingScheme += f"{str_.ljust(numberSpaces)}" + shufflingScheme += "\n" + rowCount += 1 + shufflingScheme += "0 0" + shufflingScheme += "\n" + + return shufflingScheme + def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag): """ Get core map depending on symmetry, number of assemblies and reflector @@ -198,7 +284,7 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag): """ if mapSize.lower() == "full_core" or mapSize.lower() == "full": mapKey = "FULL" - allowedSymmetries = ("OCTANT","QUARTER_ROTATIONAL","QUARTER_MIRROR") + allowedSymmetries = ("OCTANT","QUARTER_ROTATIONAL","QUARTER_MIRROR", "NO_SYMMETRY") if symmetry.upper() in allowedSymmetries: symmetryKey = symmetry.upper() else: @@ -258,6 +344,42 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag): 12:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None}, 13:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None}, 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:25,7:24,8:25,9:None,10:None,11:None,12:None,13:None,14:None}} +coreMaps['FULL']['NO_SYMMETRY'] = {} +coreMaps['FULL']['NO_SYMMETRY'][157] = {} +coreMaps['FULL']['NO_SYMMETRY'][157]['WITH_REFLECTOR'] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None, 6:0, 7:1, 8:2, 9:3, 10:4, 11:None,12:None,13:None,14:None, 15:None,16:None}, + 1:{0:None,1:None,2:None,3:None,4:5 ,5:6 , 6:7 , 7:8, 8:9, 9:10, 10:11, 11:12, 12:13, 13:None,14:None, 15:None,16:None}, + 2:{0:None,1:None,2:None,3:14 ,4:15 ,5:16, 6:17, 7:18, 8:19, 9:20, 10:21, 11:22, 12:23, 13:24, 14:None, 15:None,16:None}, + 3:{0:None,1:None,2:25 ,3:26 ,4:27 ,5:28, 6:29, 7:30, 8:31, 9:32, 10:33, 11:34, 12:35, 13:36, 14:37 , 15:None,16:None}, + 4:{0:None,1:38, 2:39 ,3:40 ,4:41 ,5:42, 6:43, 7:44, 8:45, 9:46, 10:47, 11:48, 12:49, 13:50, 14:51 , 15:52, 16:None}, + 5:{0:None,1:53, 2:54 ,3:55 ,4:56 ,5:57, 6:58, 7:59, 8:60, 9:61, 10:62, 11:63, 12:64, 13:65, 14:66 , 15:67, 16:None}, + 6:{0:68, 1:69, 2:70 ,3:71 ,4:72 ,5:73, 6:74, 7:75, 8:76, 9:77, 10:78, 11:79, 12:80, 13:81, 14:82 , 15:83, 16:84}, + 7:{0:85, 1:86, 2:87 ,3:88 ,4:89 ,5:90, 6:91, 7:92, 8:93, 9:94, 10:95, 11:96, 12:97, 13:98, 14:99 , 15:100, 16:101}, + 8:{0:102, 1:103, 2:104 ,3:105 ,4:106 ,5:107, 6:108,7:109,8:110,9:111,10:112,11:113, 12:114, 13:115, 14:116 , 15:117, 16:118}, + 9:{0:119, 1:120, 2:121 ,3:122 ,4:123 ,5:124, 6:125,7:126,8:127,9:128,10:129,11:130, 12:131, 13:132, 14:133 , 15:134, 16:135}, + 10:{0:136, 1:137, 2:138 ,3:139 ,4:140 ,5:141, 6:142,7:143,8:144,9:145,10:146,11:147, 12:148, 13:149, 14:150 , 15:151, 16:152}, + 11:{0:None,1:153, 2:154 ,3:155 ,4:156 ,5:157, 6:158,7:159,8:160,9:161,10:162,11:163, 12:164, 13:165, 14:166 , 15:167, 16:None}, + 12:{0:None,1:168, 2:169 ,3:170 ,4:171 ,5:172, 6:173,7:174,8:175,9:176,10:177,11:178, 12:179, 13:180, 14:181 , 15:182, 16:None}, + 13:{0:None,1:None,2:183 ,3:184 ,4:185 ,5:186, 6:187,7:188,8:189,9:190,10:191,11:192, 12:193, 13:194, 14:195 , 15:None,16:None}, + 14:{0:None,1:None,2:None,3:196 ,4:197 ,5:198, 6:199,7:200,8:201,9:202,10:203,11:204, 12:205, 13:206, 14:None, 15:None,16:None}, + 15:{0:None,1:None,2:None,3:None,4:207 ,5:208, 6:209,7:210,8:211,9:212,10:213,11:214, 12:215, 13:None,14:None, 15:None,16:None}, + 16:{0:None,1:None,2:None,3:None,4:None,5:None,6:216,7:217,8:218,9:219,10:220,11:None,12:None,13:None,14:None, 15:None,16:None}} + +coreMaps['FULL']['NO_SYMMETRY'][157]['WITHOUT_REFLECTOR'] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None, 6:0, 7:1, 8:2, 9:None,10:None,11:None,12:None,13:None,14:None}, + 1:{0:None,1:None,2:None,3:None,4:3, 5:4, 6:5, 7:6, 8:7, 9:8, 10:9, 11:None,12:None,13:None,14:None}, + 2:{0:None,1:None,2:None,3:10, 4:11, 5:12, 6:13, 7:14, 8:15, 9:16, 10:17, 11:18, 12:None,13:None,14:None}, + 3:{0:None,1:None,2:19, 3:20, 4:21, 5:22, 6:23, 7:24, 8:25, 9:26, 10:27, 11:28, 12:29, 13:None,14:None}, + 4:{0:None,1:30, 2:31, 3:32, 4:33, 5:34, 6:35, 7:36, 8:37, 9:38, 10:39, 11:40, 12:41, 13:42, 14:None}, + 5:{0:None,1:43, 2:44, 3:45, 4:46, 5:47, 6:48, 7:49, 8:50, 9:51, 10:52, 11:53, 12:54, 13:55, 14:None}, + 6:{0:56, 1:57, 2:58, 3:59, 4:60, 5:61, 6:62, 7:63, 8:64, 9:65, 10:66, 11:67, 12:68, 13:69, 14:70}, + 7:{0:71, 1:72, 2:73, 3:74, 4:75, 5:76, 6:77, 7:78, 8:79, 9:80, 10:81, 11:82, 12:83, 13:84, 14:85}, + 8:{0:86, 1:87, 2:88, 3:89, 4:90, 5:91, 6:92, 7:93, 8:94, 9:95, 10:96, 11:97, 12:98, 13:99, 14:100}, + 9:{0:None,1:101, 2:102, 3:103, 4:104, 5:105, 6:106,7:107,8:108,9:109, 10:110, 11:111, 12:112, 13:113, 14:None}, + 10:{0:None,1:114, 2:115, 3:116, 4:117, 5:118, 6:119,7:120,8:121,9:122, 10:123, 11:124, 12:125, 13:126, 14:None}, + 11:{0:None,1:None,2:127, 3:128, 4:129, 5:130, 6:131,7:132,8:133,9:134, 10:135, 11:136, 12:137, 13:None,14:None}, + 12:{0:None,1:None,2:None,3:138, 4:139, 5:140, 6:141,7:142,8:143,9:144, 10:145, 11:146, 12:None,13:None,14:None}, + 13:{0:None,1:None,2:None,3:None,4:147, 5:148, 6:149,7:150,8:151,9:152, 10:153, 11:None,12:None,13:None,14:None}, + 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:154,7:155,8:156,9:None,10:None,11:None,12:None,13:None,14:None}} + coreMaps['FULL']['OCTANT'][193] = {} coreMaps['FULL']['OCTANT'][193]['WITH_REFLECTOR'] = {0:{0:None,1:None,2:None,3:None,4:39,5:38,6:37,7:36,8:35,9:36,10:37,11:38,12:39,13:None,14:None,15:None,16:None}, 1:{0:None,1:None,2:34, 3:33, 4:32,5:31,6:30,7:29,8:28,9:29,10:30,11:31,12:32,13:33, 14:34, 15:None,16:None}, @@ -642,3 +764,24 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag): 14:{8:21, 9:22, 10:23, 11:24, 12:25, 13:26, 14:27, 15:None,16:None}, 15:{8:28, 9:29, 10:30, 11:31, 12:32, 13:33, 14:None,15:None,16:None}, 16:{8:34, 9:35, 10:36, 11:37, 12:None,13:None,14:None,15:None,16:None}} + +### shuffleMaps value + +shuffleMap = {} +shuffleMap['FULL'] = {} +shuffleMap['FULL']['NO_SYMMETRY'] = {} +shuffleMap['FULL']['NO_SYMMETRY'][157] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None, 6:0, 7:1, 8:2, 9:None,10:None,11:None,12:None,13:None,14:None}, + 1:{0:None,1:None,2:None,3:None,4:3, 5:4, 6:5, 7:6, 8:7, 9:8, 10:9, 11:None,12:None,13:None,14:None}, + 2:{0:None,1:None,2:None,3:10, 4:11, 5:12, 6:13, 7:14, 8:15, 9:16, 10:17, 11:18, 12:None,13:None,14:None}, + 3:{0:None,1:None,2:19, 3:20, 4:21, 5:22, 6:23, 7:24, 8:25, 9:26, 10:27, 11:28, 12:29, 13:None,14:None}, + 4:{0:None,1:30, 2:31, 3:32, 4:33, 5:34, 6:35, 7:36, 8:37, 9:38, 10:39, 11:40, 12:41, 13:42, 14:None}, + 5:{0:None,1:43, 2:44, 3:45, 4:46, 5:47, 6:48, 7:49, 8:50, 9:51, 10:52, 11:53, 12:54, 13:55, 14:None}, + 6:{0:56, 1:57, 2:58, 3:59, 4:60, 5:61, 6:62, 7:63, 8:64, 9:65, 10:66, 11:67, 12:68, 13:69, 14:70}, + 7:{0:71, 1:72, 2:73, 3:74, 4:75, 5:76, 6:77, 7:78, 8:79, 9:80, 10:81, 11:82, 12:83, 13:84, 14:85}, + 8:{0:86, 1:87, 2:88, 3:89, 4:90, 5:91, 6:92, 7:93, 8:94, 9:95, 10:96, 11:97, 12:98, 13:99, 14:100}, + 9:{0:None,1:101, 2:102, 3:103, 4:104, 5:105, 6:106,7:107,8:108,9:109, 10:110, 11:111, 12:112, 13:113, 14:None}, + 10:{0:None,1:114, 2:115, 3:116, 4:117, 5:118, 6:119,7:120,8:121,9:122, 10:123, 11:124, 12:125, 13:126, 14:None}, + 11:{0:None,1:None,2:127, 3:128, 4:129, 5:130, 6:131,7:132,8:133,9:134, 10:135, 11:136, 12:137, 13:None,14:None}, + 12:{0:None,1:None,2:None,3:138, 4:139, 5:140, 6:141,7:142,8:143,9:144, 10:145, 11:146, 12:None,13:None,14:None}, + 13:{0:None,1:None,2:None,3:None,4:147, 5:148, 6:149,7:150,8:151,9:152, 10:153, 11:None,12:None,13:None,14:None}, + 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:154,7:155,8:156,9:None,10:None,11:None,12:None,13:None,14:None}} \ No newline at end of file From a762df167465cd5df34675f97ea34117a8660b9c Mon Sep 17 00:00:00 2001 From: Juan Luque-Gutierrez Date: Wed, 31 Jul 2024 14:28:05 -0600 Subject: [PATCH 82/84] RAVEN/SIM3 Interface updated to work with quarter rotational symmetry in nth cycle shuffling scheme optimization --- .../SIMULATE3/SimulateData.py | 23 +++- .../SIMULATE3/SpecificParser.py | 117 ++++++++++++------ 2 files changed, 104 insertions(+), 36 deletions(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index 77c4bfd8ed..d39cf8b059 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -13,7 +13,7 @@ # limitations under the License. """ Created on June 04, 2022 -@author: khnguy22 NCSU +@author: khnguy22 NCSU, luquj NCSU comments: Interface for SIMULATE3 loading pattern optimzation """ @@ -42,6 +42,7 @@ def __init__(self,filen): self.data["cycle_length"] = self.EOCEFPD() self.data["pin_peaking"] = self.pinPeaking() self.data["exposure"] = self.burnupEOC() + self.data["neutron_leakage"] = self.neutron_leakage() # self.data["assembly_power"] = self.assemblyPeakingFactors() # self.data["fuel_cost"] = self.fuel_cost() @@ -564,6 +565,26 @@ def fuel_cost(self): outputDict = {'info_ids':['fuel_cost'], 'values': [fuel_cost]} return outputDict + def neutron_leakage(self): + """ + Returns Maximum neutron leakage found in the current cycle. + @ In, None + @ Out, outputDict, dict, the dictionary containing the read data (None if none found) + {'info_ids':list(of ids of data), + 'values': list} + """ + outputDict = None + leakage_list = [] + for line in self.lines: + if "Total Neutron Leakage" in line: + elems = line.strip().split() + leakage_list.append(float(elems[-1])) + if not leakage_list: + return ValueError("No values returned. Check Simulate File executed correctly") + else: + outputDict = {'info_ids':['neutron_leakage'], 'values':[10000*max(leakage_list)]} + return outputDict + def writeCSV(self, fileout): """ Print Data into CSV format diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py index d103aafc87..67e08aba9c 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py @@ -170,7 +170,7 @@ def generateSim3Input(self, parameter): # file_.write(f"This is the active height: {parameter.activeHeight}/\n") if parameter.batchNumber >= 2: file_.write("'FUE.INI', 'JILAB'/\n") - file_.write(f"'WRE' 'cycle{parameter.batchNumber}.res'/\n") + # file_.write(f"'WRE' 'cycle{parameter.batchNumber}.res'/\n") file_.write("'STA'/\n") file_.write("'END'/\n") file_.close() @@ -225,6 +225,31 @@ def getMap(parameter, locationList): loadingPattern += "\n" return loadingPattern +# Code specific to shuffling schemes + +def findLabel(faID,faDict,quad): + """ + Get type of FA ID + @ In, faID, int/str, the id for FA + @ In, faDict, list, list of FA xml input attributes + @ Out, faType, list, list of FA types + """ + faLabel = [id[f'type{quad}'] for id in faDict if id['FAid']==str(faID)][0] + return faLabel + +def quadrant_search(row, col, map_length): + # print(map_length) + if row > (map_length // 2) and col > (map_length // 2 - 1): + quad = 1 + elif row > (map_length // 2 - 1) and col < (map_length // 2): + quad = 2 + elif row < (map_length // 2) and col < (map_length // 2 + 1): + quad = 3 + elif row < (map_length // 2 + 1) and col > (map_length // 2): + quad = 4 + else: + quad = 1 + return quad def getShufflingScheme(parameter, locationList): """ @@ -233,10 +258,8 @@ def getShufflingScheme(parameter, locationList): @ In, locationList, list, Location list from PerturbedPaser class @ Out, shufflingScheme, str, Shuffling Scheme """ - maxType = max([id['type'] for id in parameter.faDict]) - print(maxType) + maxType = max([id['type1'] for id in parameter.faDict]) numberSpaces = len(str(maxType)) + 3 - print(numberSpaces) problemMap = getCoreMap(parameter.mapSize, parameter.symmetry, parameter.numberAssemblies, parameter.reflectorFlag) rowCount = 1 @@ -255,7 +278,12 @@ def getShufflingScheme(parameter, locationList): if isinstance(problemMap[row][col], int): geneNumber = problemMap[row][col] gene = locationList[geneNumber] - value = findType(gene,faDict) + if parameter.symmetry == 'quarter_rotational': + # print("quarter_rotational") + quad = quadrant_search(row, col, len(problemMap)) + value = findLabel(gene, faDict, quad) + else: + value = findType(gene,faDict) str_ = f"{value}" shufflingScheme += f"{str_.ljust(numberSpaces)}" else: @@ -263,7 +291,15 @@ def getShufflingScheme(parameter, locationList): else: geneNumber = problemMap[row][col] gene = locationList[geneNumber] - value = findType(gene,faDict) + if parameter.symmetry == 'quarter_rotational': + # print("Quarter_rotational") + # print(f"This is the map length {len(problemMap)}") + quad = quadrant_search(row, col, len(problemMap)) + # print(f"This is the current quadrant {quad}") + value = findLabel(gene, faDict, quad) + # print(f"This is the value: {value}") + else: + value = findType(gene,faDict) str_ = f"{value}" shufflingScheme += f"{str_.ljust(numberSpaces)}" shufflingScheme += "\n" @@ -560,20 +596,20 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag): 15:{0:None,1:None,2:None,3:None,4:34, 5:26, 6:17, 7:8, 8:48,9:49,10:50,11:51, 12:52, 13:None,14:None,15:None,16:None}, 16:{0:None,1:None,2:None,3:None,4:None,5:None,6:18, 7:9, 8:53,9:54,10:55,11:None,12:None,13:None,14:None,15:None,16:None}} coreMaps['FULL']['QUARTER_ROTATIONAL'][157]['WITHOUT_REFLECTOR'] = {0 :{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:39, 7:38, 8: 8, 9:None, 10:None , 11:None, 12:None, 13:None, 14:None}, - 1 :{0:None, 1:None, 2:None, 3:None, 4:37, 5:36, 6:35, 7:34, 8: 7, 9:15, 10:22, 11:None, 12:None, 13:None, 14:None}, - 2 :{0:None, 1:None, 2:None, 3:33, 4:32, 5:31, 6:30, 7:29, 8: 6, 9:14, 10:21, 11:28, 12:None, 13:None, 14:None}, - 3 :{0:None, 1:None, 2:28, 3:27, 4:26, 5:25, 6:24, 7:23, 8: 5, 9:13, 10:20, 11:27, 12:33, 13:None, 14:None}, - 4 :{0:None, 1:22, 2:21, 3:20, 4:19, 5:18, 6:17, 7:16, 8: 4, 9:12, 10:19, 11:26, 12:32, 13:37, 14:None}, - 5 :{0:None, 1:15, 2:14, 3:13, 4:12, 5:11, 6:10, 7: 9, 8: 3, 9:11, 10:18, 11:25, 12:31, 13:36, 14:None}, - 6 :{0: 8, 1: 7, 2: 6, 3: 5, 4: 4, 5: 3, 6:2, 7: 1, 8: 2, 9:10, 10:17, 11:24, 12:30, 13:35, 14:39}, - 7 :{0:38, 1:34, 2:29, 3:23, 4:16, 5: 9, 6:1, 7: 0, 8: 1, 9: 9, 10:16, 11:23, 12:29, 13:34, 14:38}, - 8 :{0:39, 1:35, 2:30, 3:24, 4:17, 5:10, 6:2, 7: 1, 8: 2, 9: 3, 10: 4, 11: 5, 12: 6, 13: 7, 14: 8}, - 9 :{0:None, 1:36, 2:31, 3:25, 4:18, 5:11, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None}, - 10:{0:None, 1:37, 2:32, 3:26, 4:19, 5:12, 6:4, 7:16, 8:17, 9:18, 10:19, 11:20, 12:21, 13:22, 14:None}, - 11:{0:None, 1:None, 2:33, 3:27, 4:20, 5:13, 6:5, 7:23, 8:24, 9:25, 10:26, 11:27, 12:28, 13:None, 14:None}, - 12:{0:None, 1:None, 2:None, 3:28, 4:21, 5:14, 6:6, 7:29, 8:30, 9:31, 10:32, 11:33, 12:None, 13:None, 14:None}, - 13:{0:None, 1:None, 2:None, 3:None, 4:22, 5:15, 6:7, 7:34, 8:35, 9:36, 10:37, 11:None, 12:None, 13:None, 14:None}, - 14:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:8, 7:38, 8:39, 9:None, 10:None , 11:None, 12:None, 13:None, 14:None}} + 1 :{0:None, 1:None, 2:None, 3:None, 4:37, 5:36, 6:35, 7:34, 8: 7, 9:15, 10:22, 11:None, 12:None, 13:None, 14:None}, + 2 :{0:None, 1:None, 2:None, 3:33, 4:32, 5:31, 6:30, 7:29, 8: 6, 9:14, 10:21, 11:28, 12:None, 13:None, 14:None}, + 3 :{0:None, 1:None, 2:28, 3:27, 4:26, 5:25, 6:24, 7:23, 8: 5, 9:13, 10:20, 11:27, 12:33, 13:None, 14:None}, + 4 :{0:None, 1:22, 2:21, 3:20, 4:19, 5:18, 6:17, 7:16, 8: 4, 9:12, 10:19, 11:26, 12:32, 13:37, 14:None}, + 5 :{0:None, 1:15, 2:14, 3:13, 4:12, 5:11, 6:10, 7: 9, 8: 3, 9:11, 10:18, 11:25, 12:31, 13:36, 14:None}, + 6 :{0: 8, 1: 7, 2: 6, 3: 5, 4: 4, 5: 3, 6:2, 7: 1, 8: 2, 9:10, 10:17, 11:24, 12:30, 13:35, 14:39}, + 7 :{0:38, 1:34, 2:29, 3:23, 4:16, 5: 9, 6:1, 7: 0, 8: 1, 9: 9, 10:16, 11:23, 12:29, 13:34, 14:38}, + 8 :{0:39, 1:35, 2:30, 3:24, 4:17, 5:10, 6:2, 7: 1, 8: 2, 9: 3, 10: 4, 11: 5, 12: 6, 13: 7, 14: 8}, + 9 :{0:None, 1:36, 2:31, 3:25, 4:18, 5:11, 6:3, 7:9, 8:10, 9:11, 10:12, 11:13, 12:14, 13:15, 14:None}, + 10:{0:None, 1:37, 2:32, 3:26, 4:19, 5:12, 6:4, 7:16, 8:17, 9:18, 10:19, 11:20, 12:21, 13:22, 14:None}, + 11:{0:None, 1:None, 2:33, 3:27, 4:20, 5:13, 6:5, 7:23, 8:24, 9:25, 10:26, 11:27, 12:28, 13:None, 14:None}, + 12:{0:None, 1:None, 2:None, 3:28, 4:21, 5:14, 6:6, 7:29, 8:30, 9:31, 10:32, 11:33, 12:None, 13:None, 14:None}, + 13:{0:None, 1:None, 2:None, 3:None, 4:22, 5:15, 6:7, 7:34, 8:35, 9:36, 10:37, 11:None, 12:None, 13:None, 14:None}, + 14:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:8, 7:38, 8:39, 9:None, 10:None , 11:None, 12:None, 13:None, 14:None}} coreMaps['FULL']['QUARTER_ROTATIONAL'][193] = {} coreMaps['FULL']['QUARTER_ROTATIONAL'][193]['WITH_REFLECTOR'] = {0:{0:None,1:None,2:None,3:None,4:None,5:78,6:77,7:76,8:75,9:74,10:75,11:76,12:77,13:78,14:None,15:None,16:None,17:None,18:None}, @@ -770,18 +806,29 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag): shuffleMap = {} shuffleMap['FULL'] = {} shuffleMap['FULL']['NO_SYMMETRY'] = {} -shuffleMap['FULL']['NO_SYMMETRY'][157] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None, 6:0, 7:1, 8:2, 9:None,10:None,11:None,12:None,13:None,14:None}, - 1:{0:None,1:None,2:None,3:None,4:3, 5:4, 6:5, 7:6, 8:7, 9:8, 10:9, 11:None,12:None,13:None,14:None}, - 2:{0:None,1:None,2:None,3:10, 4:11, 5:12, 6:13, 7:14, 8:15, 9:16, 10:17, 11:18, 12:None,13:None,14:None}, - 3:{0:None,1:None,2:19, 3:20, 4:21, 5:22, 6:23, 7:24, 8:25, 9:26, 10:27, 11:28, 12:29, 13:None,14:None}, - 4:{0:None,1:30, 2:31, 3:32, 4:33, 5:34, 6:35, 7:36, 8:37, 9:38, 10:39, 11:40, 12:41, 13:42, 14:None}, - 5:{0:None,1:43, 2:44, 3:45, 4:46, 5:47, 6:48, 7:49, 8:50, 9:51, 10:52, 11:53, 12:54, 13:55, 14:None}, - 6:{0:56, 1:57, 2:58, 3:59, 4:60, 5:61, 6:62, 7:63, 8:64, 9:65, 10:66, 11:67, 12:68, 13:69, 14:70}, - 7:{0:71, 1:72, 2:73, 3:74, 4:75, 5:76, 6:77, 7:78, 8:79, 9:80, 10:81, 11:82, 12:83, 13:84, 14:85}, - 8:{0:86, 1:87, 2:88, 3:89, 4:90, 5:91, 6:92, 7:93, 8:94, 9:95, 10:96, 11:97, 12:98, 13:99, 14:100}, - 9:{0:None,1:101, 2:102, 3:103, 4:104, 5:105, 6:106,7:107,8:108,9:109, 10:110, 11:111, 12:112, 13:113, 14:None}, - 10:{0:None,1:114, 2:115, 3:116, 4:117, 5:118, 6:119,7:120,8:121,9:122, 10:123, 11:124, 12:125, 13:126, 14:None}, - 11:{0:None,1:None,2:127, 3:128, 4:129, 5:130, 6:131,7:132,8:133,9:134, 10:135, 11:136, 12:137, 13:None,14:None}, - 12:{0:None,1:None,2:None,3:138, 4:139, 5:140, 6:141,7:142,8:143,9:144, 10:145, 11:146, 12:None,13:None,14:None}, - 13:{0:None,1:None,2:None,3:None,4:147, 5:148, 6:149,7:150,8:151,9:152, 10:153, 11:None,12:None,13:None,14:None}, - 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:154,7:155,8:156,9:None,10:None,11:None,12:None,13:None,14:None}} \ No newline at end of file +shuffleMap['FULL']['NO_SYMMETRY'][157] = { 0:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:"J-01",7:"H-01",8:"G-01",9:None, 10:None, 11:None, 12:None, 13:None, 14:None}, + 1:{0:None, 1:None, 2:None, 3:None, 4:"L-02",5:"K-02",6:"J-02",7:"H-02",8:"G-02",9:"F-02",10:"E-02",11:None, 12:None, 13:None, 14:None}, + 2:{0:None, 1:None, 2:None, 3:"M-03",4:"L-03",5:"K-03",6:"J-03",7:"H-03",8:"G-03",9:"F-03",10:"E-03",11:"D-03",12:None, 13:None, 14:None}, + 3:{0:None, 1:None, 2:"N-04",3:"M-04",4:"L-04",5:"K-04",6:"J-04",7:"H-04",8:"G-04",9:"F-04",10:"E-04",11:"D-04",12:"C-04",13:None, 14:None}, + 4:{0:None, 1:"P-05",2:"N-05",3:"M-05",4:"L-05",5:"K-05",6:"J-05",7:"H-05",8:"G-05",9:"F-05",10:"E-05",11:"D-05",12:"C-05",13:"B-05",14:None}, + 5:{0:None, 1:"P-06",2:"N-06",3:"M-06",4:"L-06",5:"K-06",6:"J-06",7:"H-06",8:"G-06",9:"F-06",10:"E-06",11:"D-06",12:"C-06",13:"B-06",14:None}, + 6:{0:"R-07",1:"P-07",2:"N-07",3:"M-07",4:"L-07",5:"K-07",6:"J-07",7:"H-07",8:"G-07",9:"F-07",10:"E-07",11:"D-07",12:"C-07",13:"B-07",14:"A-07"}, + 7:{0:"R-08",1:"P-08",2:"N-08",3:"M-08",4:"L-08",5:"K-08",6:"J-08",7:"H-08",8:"G-08",9:"F-08",10:"E-08",11:"D-08",12:"C-08",13:"B-08",14:"A-08"}, + 8:{0:"R-09",1:"P-09",2:"N-09",3:"M-09",4:"L-09",5:"K-09",6:"J-09",7:"H-09",8:"G-09",9:"F-09",10:"E-09",11:"D-09",12:"C-09",13:"B-09",14:"A-08"}, + 9:{0:None, 1:"P-10",2:"N-10",3:"M-10",4:"L-10",5:"K-10",6:"J-10",7:"H-10",8:"G-10",9:"F-10",10:"E-10",11:"D-10",12:"C-10",13:"B-10",14:None}, + 10:{0:None, 1:"P-11",2:"N-11",3:"M-11",4:"L-11",5:"K-11",6:"J-11",7:"H-11",8:"G-11",9:"F-11",10:"E-11",11:"D-11",12:"C-11",13:"B-11",14:None}, + 11:{0:None, 1:None, 2:"N-12",3:"M-12",4:"L-12",5:"K-12",6:"J-12",7:"H-12",8:"G-12",9:"F-12",10:"E-12",11:"D-12",12:"C-12",13:None, 14:None}, + 12:{0:None, 1:None, 2:None, 3:"M-13",4:"L-13",5:"K-13",6:"J-13",7:"H-13",8:"G-13",9:"F-13",10:"E-13",11:"D-13",12:None, 13:None, 14:None}, + 13:{0:None, 1:None, 2:None, 3:None, 4:"L-14",5:"K-14",6:"J-14",7:"H-14",8:"G-14",9:"F-14",10:"E-14",11:None, 12:None, 13:None, 14:None}, + 14:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:"J-15",7:"H-15",8:"G-15",9:None, 10:None, 11:None, 12:None, 13:None, 14:None}} + +shuffleMap['FULL']['QUARTER'] = {} +shuffleMap['FULL']['QUARTER'][157] = {} +shuffleMap['FULL']['QUARTER'][157] = {7 :{7: 0, 8: 1, 9: 9, 10:16, 11:23, 12:29, 13:34, 14:38}, + 8 :{7: 1, 8: 2, 9: 3, 10: 4, 11: 5, 12: 6, 13: 7, 14: 8}, + 9 :{7: 9, 8:10, 9:11, 10:12, 11:13, 12:14, 13:15, 14:None}, + 10:{7:16, 8:17, 9:18, 10:19, 11:20, 12:21, 13:22, 14:None}, + 11:{7:23, 8:24, 9:25, 10:26, 11:27, 12:28, 13:None,14:None}, + 12:{7:29, 8:30, 9:31, 10:32, 11:33, 12:None, 13:None,14:None}, + 13:{7:34, 8:35, 9:36, 10:37, 11:None,12:None, 13:None,14:None}, + 14:{7:38, 8:39, 9:None,10:None,11:None,12:None, 13:None,14:None}} \ No newline at end of file From 13f6791bae1b102b61bf59712ee4f635ff06a706 Mon Sep 17 00:00:00 2001 From: Juan Luque-Gutierrez Date: Mon, 5 Aug 2024 18:35:08 -0600 Subject: [PATCH 83/84] Core average burnup extractor for nth cycle opt was added --- .../SIMULATE3/SimulateData.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py index d39cf8b059..3d835bd6e5 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py @@ -43,6 +43,7 @@ def __init__(self,filen): self.data["pin_peaking"] = self.pinPeaking() self.data["exposure"] = self.burnupEOC() self.data["neutron_leakage"] = self.neutron_leakage() + self.data["core_avg_burnup"] = self.core_avg_burnup() # self.data["assembly_power"] = self.assemblyPeakingFactors() # self.data["fuel_cost"] = self.fuel_cost() @@ -585,6 +586,30 @@ def neutron_leakage(self): outputDict = {'info_ids':['neutron_leakage'], 'values':[10000*max(leakage_list)]} return outputDict + def core_avg_burnup(self): + """ + Returns the accumulated average core burnup at EOC. + @ In, None + @ Out, outputDict, dict, the dictionary containing the read data (None if none found) + {'info_ids':list(of ids of data), + 'values': list} + """ + list_ = [] + outputDict = None + for line in self.lines: + if "Core Average Exposure" in line: + if "EBAR" in line: + elems = line.strip().split() + spot = elems.index("EBAR") + list_.append(float(elems[spot+1])) + print(list_) + print(list_[-1]) + if not list_: + return ValueError("No values returned. Check Simulate file executed correctly.") + else: + outputDict = {'info_ids':['core_avg_exp'], 'values': [list_[-1]]} + return outputDict + def writeCSV(self, fileout): """ Print Data into CSV format From 693545610199ffcb639b029c60ae05b0c33e2a4f Mon Sep 17 00:00:00 2001 From: Juan Luque-Gutierrez Date: Thu, 15 Aug 2024 18:17:07 -0600 Subject: [PATCH 84/84] Changes made to the interface for octant symmetry --- .../SIMULATE3/SpecificParser.py | 139 ++++++++++++++---- 1 file changed, 109 insertions(+), 30 deletions(-) diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py index 67e08aba9c..d9f07068ad 100644 --- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py +++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py @@ -232,24 +232,94 @@ def findLabel(faID,faDict,quad): Get type of FA ID @ In, faID, int/str, the id for FA @ In, faDict, list, list of FA xml input attributes - @ Out, faType, list, list of FA types + @ Out, faLabel, list, list of FA labels """ faLabel = [id[f'type{quad}'] for id in faDict if id['FAid']==str(faID)][0] + if not faLabel: + return ValueError("Make sure labels are ordered.") + else: + faLabel = faLabel return faLabel def quadrant_search(row, col, map_length): + """ + Get quadrant in quarter symmetry. + @ In, row, of the FA evaluated + @ In, col, of the FA evaluated + @ Out, quad, quadrant in which the FA is located + """ # print(map_length) - if row > (map_length // 2) and col > (map_length // 2 - 1): - quad = 1 - elif row > (map_length // 2 - 1) and col < (map_length // 2): - quad = 2 - elif row < (map_length // 2) and col < (map_length // 2 + 1): - quad = 3 - elif row < (map_length // 2 + 1) and col > (map_length // 2): - quad = 4 - else: - quad = 1 - return quad + if row > (map_length // 2) and col > (map_length // 2 - 1): + quad = 1 + elif row > (map_length // 2 - 1) and col < (map_length // 2): + quad = 2 + elif row < (map_length // 2) and col < (map_length // 2 + 1): + quad = 3 + elif row < (map_length // 2 + 1) and col > (map_length // 2): + quad = 4 + else: + quad = 1 + return quad + +def octant_search(row, col, map_length): + """ + Get octant in octant symmetry. + @ In, row, of the FA evaluated + @ In, col, of the FA evaluated + @ Out, oct, quadrant in which the FA is located + """ + # print(map_length) + x = col - map_length // 2 + # print(x) + y = map_length // 2 - row + # print(y) + oct = 0 + diff = (abs(x) - abs(y)) + if x > 0: + if y < 0: + if diff < 0: + oct = 1 + elif diff > 0: + oct = 8 + elif y > 0: + if diff > 0: + oct = 7 + elif diff < 0: + oct = 6 + elif x < 0: + if y < 0: + if diff < 0: + oct = 2 + elif diff > 0: + oct = 3 + elif y > 0: + if diff > 0: + oct = 4 + elif diff < 0: + oct = 5 + # To check vertical and horizontal centerlines + if x == 0: + if y < 0: + oct = 1 + else: + oct = 3 + elif y == 0: + if x < 0: + oct = 2 + else: + oct = 4 + # To check for the diagonals + if diff == 0: + if x > 0 and y < 0: + oct = 1 + elif x < 0 and y < 0: + oct = 2 + elif x < 0 and y > 0: + oct = 3 + elif x > 0 and y > 0: + oct = 4 + + return oct def getShufflingScheme(parameter, locationList): """ @@ -258,13 +328,14 @@ def getShufflingScheme(parameter, locationList): @ In, locationList, list, Location list from PerturbedPaser class @ Out, shufflingScheme, str, Shuffling Scheme """ - maxType = max([id['type1'] for id in parameter.faDict]) - numberSpaces = len(str(maxType)) + 3 + maxLabel = max([len(id['type1']) for id in parameter.faDict]) + numberSpaces = maxLabel + 3 problemMap = getCoreMap(parameter.mapSize, parameter.symmetry, parameter.numberAssemblies, parameter.reflectorFlag) rowCount = 1 shufflingScheme = "" faDict = parameter.faDict + Quarter_symmetries = ("QUARTER_ROTATIONAL","QUARTER_MIRROR") # print(faDict) for row in range(25): #max core 25x25 if row in problemMap: @@ -278,10 +349,14 @@ def getShufflingScheme(parameter, locationList): if isinstance(problemMap[row][col], int): geneNumber = problemMap[row][col] gene = locationList[geneNumber] - if parameter.symmetry == 'quarter_rotational': + # if parameter.symmetry == 'quarter_rotational': + if parameter.symmetry.upper() in Quarter_symmetries: # print("quarter_rotational") quad = quadrant_search(row, col, len(problemMap)) value = findLabel(gene, faDict, quad) + elif parameter.symmetry.upper() == 'OCTANT': + oct = octant_search(row, col, len(problemMap)) + value = findLabel(gene, faDict, oct) else: value = findType(gene,faDict) str_ = f"{value}" @@ -291,13 +366,17 @@ def getShufflingScheme(parameter, locationList): else: geneNumber = problemMap[row][col] gene = locationList[geneNumber] - if parameter.symmetry == 'quarter_rotational': + # if parameter.symmetry == 'quarter_rotational': + if parameter.symmetry.upper() in Quarter_symmetries: # print("Quarter_rotational") # print(f"This is the map length {len(problemMap)}") quad = quadrant_search(row, col, len(problemMap)) # print(f"This is the current quadrant {quad}") value = findLabel(gene, faDict, quad) # print(f"This is the value: {value}") + elif parameter.symmetry.upper() == 'OCTANT': + oct = octant_search(row, col, len(problemMap)) + value = findLabel(gene, faDict, oct) else: value = findType(gene,faDict) str_ = f"{value}" @@ -366,20 +445,20 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag): 15:{0:None,1:None,2:None,3:None,4:31 ,5:30, 6:29,7:28,8:27,9:28,10:29,11:30, 12:31, 13:None,14:None, 15:None,16:None}, 16:{0:None,1:None,2:None,3:None,4:None,5:None,6:34,7:33,8:32,9:33,10:34,11:None,12:None,13:None,14:None, 15:None,16:None}} coreMaps['FULL']['OCTANT'][157]['WITHOUT_REFLECTOR'] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None,6:25,7:24,8:25,9:None,10:None,11:None,12:None,13:None,14:None}, - 1:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None}, - 2:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None}, - 3:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None}, - 4:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7, 9:8, 10:9, 11:13, 12:18, 13:23, 14:None}, - 5:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None}, - 6:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25}, - 7:{0:24, 1:20, 2:15, 3:10, 4:6, 5:3, 6:1, 7:0, 8:1, 9:3, 10:6, 11:10, 12:15, 13:20, 14:24}, - 8:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25}, - 9:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None}, - 10:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7,9:8, 10:9 , 11:13, 12:18, 13:23, 14:None}, - 11:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None}, - 12:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None}, - 13:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None}, - 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:25,7:24,8:25,9:None,10:None,11:None,12:None,13:None,14:None}} + 1:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None}, + 2:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None}, + 3:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None}, + 4:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7, 9:8, 10:9, 11:13, 12:18, 13:23, 14:None}, + 5:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None}, + 6:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25}, + 7:{0:24, 1:20, 2:15, 3:10, 4:6, 5:3, 6:1, 7:0, 8:1, 9:3, 10:6, 11:10, 12:15, 13:20, 14:24}, + 8:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25}, + 9:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None}, + 10:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7, 9:8, 10:9 , 11:13, 12:18, 13:23, 14:None}, + 11:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None}, + 12:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None}, + 13:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None}, + 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:25,7:24,8:25,9:None,10:None,11:None,12:None,13:None,14:None}} coreMaps['FULL']['NO_SYMMETRY'] = {} coreMaps['FULL']['NO_SYMMETRY'][157] = {} coreMaps['FULL']['NO_SYMMETRY'][157]['WITH_REFLECTOR'] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None, 6:0, 7:1, 8:2, 9:3, 10:4, 11:None,12:None,13:None,14:None, 15:None,16:None},