diff --git a/.gitmodules b/.gitmodules
index 41c9762a8a..4b6a4015fa 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,3 @@
-[submodule "plugins/TEAL"]
- path = plugins/TEAL
- url = https://github.com/idaholab/TEAL.git
[submodule "plugins/HERON"]
path = plugins/HERON
url = https://github.com/idaholab/HERON.git
diff --git a/dependencies.xml b/dependencies.xml
index d12e2e978a..36f37e1c40 100644
--- a/dependencies.xml
+++ b/dependencies.xml
@@ -108,4 +108,4 @@ Note all install methods after "main" take
removeremove
-
+
\ No newline at end of file
diff --git a/doc/user_manual/generated/generateOptimizerDoc.py b/doc/user_manual/generated/generateOptimizerDoc.py
index 946522370c..b44e867b29 100644
--- a/doc/user_manual/generated/generateOptimizerDoc.py
+++ b/doc/user_manual/generated/generateOptimizerDoc.py
@@ -152,7 +152,7 @@ def insertSolnExport(tex, obj):
- 20
+ 10rouletteWheel
@@ -177,32 +177,32 @@ def insertSolnExport(tex, obj):
uniform_dist_woRepl_1
- 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20
+ 1,2,3,4,5,6,7,8,9,10uniform_dist_woRepl_1
- 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1
+ 2,3,4,5,6,7,8,9,10,1uniform_dist_woRepl_1
- 3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2
+ 3,4,5,6,7,8,9,10,1,2uniform_dist_woRepl_1
- 4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2,3
+ 4,5,6,7,8,9,10,1,2,3uniform_dist_woRepl_1
- 5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2,3,4
+ 5,6,7,8,9,10,1,2,3,4uniform_dist_woRepl_1
- 6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,1,2,3,4,5
+ 6,7,8,9,10,1,2,3,4,5ans
diff --git a/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py b/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py
index 6ddac6dce2..d56fec0f4d 100644
--- a/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py
+++ b/doc/workshop/optimizer/GeneticAlgorithms/Models/Constraints.py
@@ -44,7 +44,7 @@ def YY(Input):#Complete this: give the function the correct name#
return g
-def XX(Input):#You are free to pick this name but it has to be similar to the one in the xml#
+def expConstr1(Input): #You are free to pick this name but it has to be similar to the one in the xml#
"""
Let's assume that the constraint is:
$ x3+x4 < 8 $
@@ -54,7 +54,7 @@ def XX(Input):#You are free to pick this name but it has to be similar to the on
@ In, Input, object, RAVEN container
@ out, g, float, explicit constraint 1 evaluation function
"""
- g = # Write the explicit constraint here
+ g = Input.x3 + Input.x4 - 8 # Write the explicit constraint here
return g
def expConstr2(Input):
@@ -76,7 +76,7 @@ def impConstr1(Input):
@ In, Input, object, RAVEN container
@ out, g, float, implicit constraint 1 evaluation function
"""
- return 10 - Input.x1**2 - Input.obj
+ return 10 - Input.x1**2 - Input.ans
def impConstr2(Input):
"""
@@ -85,5 +85,5 @@ def impConstr2(Input):
@ In, Input, object, RAVEN container
@ out, g, float, implicit constraint 2 evaluation function
"""
- g = Input.x1**2 + Input.obj - 10
+ g = Input.x1**2 + Input.ans - 10
return g
diff --git a/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py b/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py
index 3b07d28589..d04b4f915f 100644
--- a/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py
+++ b/doc/workshop/optimizer/GeneticAlgorithms/Models/LocalSum.py
@@ -30,7 +30,7 @@ def evaluate(Inputs):
Sum = 0
for ind,var in enumerate(Inputs.keys()):
# write the objective function here
- Sum +=
+ Sum += (ind+1) * Inputs[var]
return Sum[:]
def run(self,Inputs):
@@ -40,4 +40,4 @@ def run(self,Inputs):
@ In, Inputs, dict, additional inputs
@ Out, None
"""
- self.?? = evaluate(Inputs) # Complete This # make sure the name of the objective is consistent obj
+ self.ans = evaluate(Inputs) # Complete This # make sure the name of the objective is consistent obj
diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py
index 817a7d5381..3d835bd6e5 100644
--- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py
+++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py
@@ -13,7 +13,7 @@
# limitations under the License.
"""
Created on June 04, 2022
-@author: khnguy22 NCSU
+@author: khnguy22 NCSU, luquj NCSU
comments: Interface for SIMULATE3 loading pattern optimzation
"""
@@ -36,13 +36,17 @@ def __init__(self,filen):
# retrieve data
self.data['axial_mesh'] = self.axialMeshExtractor()
self.data['keff'] = self.coreKeffEOC()
- self.data['FDeltaH'] = self.maxFDH()
+ self.data["MaxFDH"] = self.maxFDH()
self.data["kinf"] = self.kinfEOC()
- self.data["boron"] = self.boronEOC()
+ self.data["max_boron"] = self.boronEOC()
self.data["cycle_length"] = self.EOCEFPD()
- self.data["PinPowerPeaking"] = self.pinPeaking()
+ self.data["pin_peaking"] = self.pinPeaking()
self.data["exposure"] = self.burnupEOC()
- self.data["assembly_power"] = self.assemblyPeakingFactors()
+ self.data["neutron_leakage"] = self.neutron_leakage()
+ self.data["core_avg_burnup"] = self.core_avg_burnup()
+ # self.data["assembly_power"] = self.assemblyPeakingFactors()
+ # self.data["fuel_cost"] = self.fuel_cost()
+
# this is a dummy variable for demonstration with MOF
# check if something has been found
if all(v is None for v in self.data.values()):
@@ -142,52 +146,52 @@ def coreKeffEOC(self):
return outputDict
- def assemblyPeakingFactors(self):
- """
- Extracts the assembly radial power peaking factors as a dictionary
- with the depletion step in GWD/MTU as the dictionary keys.
- @ In, None
- @ Out, outputDict, dict, the dictionary containing the read data (None if none found)
- {'info_ids':list(of ids of data),
- 'values': list}
- """
- radialPowerDictionary = {}
- searching_ = False
- outputDict = None
- for line in self.lines:
- if "Case" in line and "GWd/MT" in line:
- elems = line.strip().split()
- depl = elems[-2]
- if depl in radialPowerDictionary:
- pass
- else:
- radialPowerDictionary[depl] = {}
- if "** H- G- F- E- D- C- B- A- **" in line:
- searching_ = False
-
- if searching_:
- elems = line.strip().split()
- if elems[0] == "**":
- posList = elems[1:-1]
- else:
- radialPowerDictionary[depl][elems[0]] = {}
- for i,el in enumerate(elems[1:-1]):
- radialPowerDictionary[depl][elems[0]][posList[i]] = float(el)
-
- if "PRI.STA 2RPF - Assembly 2D Ave RPF - Relative Power Fraction" in line:
- searching_ = True
-
- if not radialPowerDictionary:
- return ValueError("No values returned. Check Simulate File executed correctly")
- else:
- maxPeaking = 0.0
- for depl in radialPowerDictionary:
- for row in radialPowerDictionary[depl]:
- for col in radialPowerDictionary[depl][row]:
- maxPeaking = max(radialPowerDictionary[depl][row][col],maxPeaking)
- outputDict = {'info_ids':['FA_peaking'], 'values': [maxPeaking] }
-
- return outputDict
+ # def assemblyPeakingFactors(self):
+ # """
+ # Extracts the assembly radial power peaking factors as a dictionary
+ # with the depletion step in GWD/MTU as the dictionary keys.
+ # @ In, None
+ # @ Out, outputDict, dict, the dictionary containing the read data (None if none found)
+ # {'info_ids':list(of ids of data),
+ # 'values': list}
+ # """
+ # radialPowerDictionary = {}
+ # searching_ = False
+ # outputDict = None
+ # for line in self.lines:
+ # if "Case" in line and "GWd/MT" in line:
+ # elems = line.strip().split()
+ # depl = elems[-2]
+ # if depl in radialPowerDictionary:
+ # pass
+ # else:
+ # radialPowerDictionary[depl] = {}
+ # if "** H- G- F- E- D- C- B- A- **" in line:
+ # searching_ = False
+
+ # if searching_:
+ # elems = line.strip().split()
+ # if elems[0] == "**":
+ # posList = elems[1:-1]
+ # else:
+ # radialPowerDictionary[depl][elems[0]] = {}
+ # for i,el in enumerate(elems[1:-1]):
+ # radialPowerDictionary[depl][elems[0]][posList[i]] = float(el)
+
+ # if "PRI.STA 2RPF - Assembly 2D Ave RPF - Relative Power Fraction" in line:
+ # searching_ = True
+
+ # if not radialPowerDictionary:
+ # return ValueError("No values returned. Check Simulate File executed correctly")
+ # else:
+ # maxPeaking = 0.0
+ # for depl in radialPowerDictionary:
+ # for row in radialPowerDictionary[depl]:
+ # for col in radialPowerDictionary[depl][row]:
+ # maxPeaking = max(radialPowerDictionary[depl][row][col],maxPeaking)
+ # outputDict = {'info_ids':['FA_peaking'], 'values': [maxPeaking] }
+
+ # return outputDict
def EOCEFPD(self):
"""
@@ -256,6 +260,8 @@ def pinPeaking(self):
spot = elems.index('Max-3PIN')
list_.append(float(elems[spot+1]))
+ print(f"This is Fq={max(list_)}")
+
if not list_:
return ValueError("No values returned. Check Simulate File executed correctly")
else:
@@ -486,6 +492,124 @@ def burnupEOC(self):
return outputDict
+ def fuel_cost(self):
+ """
+ Extracts the fuel types used in the core map and calculates the fuel cost based on a front end approach.
+ This function applies only to quarter core symmetries.
+ @ In, Non
+ @ Out, outputDict, dict, the dictionary containing the rea data (None if non found)
+ {'info_ids': list(of ids of data),
+ 'values': list}
+ """
+ outputDict = None
+ # First, we need to parse the core map from the output file.
+ # NOTE: Given that a run is not needed to know the Loading Pattern, this function could be on the input side.
+ FA_list = []
+ for line in self.lines:
+ if "'FUE.TYP'" in line:
+ p1 = line.index(",")
+ p2 = line.index("/")
+ search_space = line[p1:p2]
+ search_space = search_space.replace(",","")
+ temp = search_space.split()
+ for i in temp:
+ FA_list.append(float(i))
+ FA_types = list(set(FA_list))
+ quartcore_size = len(temp)
+
+ # We separate the core map depending on how many times their elements are counted in the symmetry:
+ # FA_list_A counted once, as it is the center of the core.
+ # FA_list_B counted twice, as they are are the centerlines.
+ # FA_list_C counted four times, as they are are the rest of fuel assemblies.
+ FA_list_A = FA_list[0]
+ FA_list_B = FA_list[1:quartcore_size] + FA_list[quartcore_size:quartcore_size*(quartcore_size-1)+1:quartcore_size]
+ FA_list_C = []
+ for i in range(quartcore_size-1):
+ FA_list_C.append(FA_list[(i+1)*quartcore_size + 1: (i+2)*quartcore_size])
+ FA_list_C = [item for sublist in FA_list_C for item in sublist] # To flatten FA_list_C
+ # Now we proceed to count how many fuel types of each type are there in our core.
+ FA_count_A = [float(fa == FA_list_A) for fa in FA_types]
+ FA_count_B = [float(FA_list_B.count(fa)*2) for fa in FA_types]
+ FA_count_C = [float(FA_list_C.count(fa)*4) for fa in FA_types]
+ FA_count = [FA_count_A[j] + FA_count_B[j] + FA_count_C[j] for j in range(len(FA_types))]
+ # And create a dictionary with all the fuel types count.
+ FA_types_dict = {int(FA_types[i]):FA_count[i] for i in range(len(FA_types))}
+
+ # Dictionary with the unit cost for each FA type.
+
+ # FA type 0 = empty -> M$ 0.0
+ # FA type 1 = reflector -> M$ 0.0
+ # FA type 2 = 2.00 wt% -> M$ 2.69520839
+ # FA type 3 = 2.50 wt% -> M$ 3.24678409
+ # FA type 4 = 2.50 wt% + Gd -> M$ 3.24678409
+ # FA type 5 = 3.20 wt% -> M$ 4.03739539
+ # FA type 6 = 3.20 wt% + Gd -> M$ 4.03739539
+ # The cost of burnable poison is not being considered.
+
+ cost_dict = {
+ 0: 0,
+ 1: 0,
+ 2: 2.69520839,
+ 3: 3.24678409,
+ 4: 3.24678409,
+ 5: 4.03739539,
+ 6: 4.03739539
+ }
+
+ fuel_cost = 0
+ for fuel_type, fuel_count in FA_types_dict.items():
+ fuel_cost += fuel_count * cost_dict[fuel_type]
+
+ if not fuel_cost:
+ return ValueError("No values returned. Check Simulate file executed correctly.")
+ else:
+ outputDict = {'info_ids':['fuel_cost'], 'values': [fuel_cost]}
+ return outputDict
+
+ def neutron_leakage(self):
+ """
+ Returns Maximum neutron leakage found in the current cycle.
+ @ In, None
+ @ Out, outputDict, dict, the dictionary containing the read data (None if none found)
+ {'info_ids':list(of ids of data),
+ 'values': list}
+ """
+ outputDict = None
+ leakage_list = []
+ for line in self.lines:
+ if "Total Neutron Leakage" in line:
+ elems = line.strip().split()
+ leakage_list.append(float(elems[-1]))
+ if not leakage_list:
+ return ValueError("No values returned. Check Simulate File executed correctly")
+ else:
+ outputDict = {'info_ids':['neutron_leakage'], 'values':[10000*max(leakage_list)]}
+ return outputDict
+
+ def core_avg_burnup(self):
+ """
+ Returns the accumulated average core burnup at EOC.
+ @ In, None
+ @ Out, outputDict, dict, the dictionary containing the read data (None if none found)
+ {'info_ids':list(of ids of data),
+ 'values': list}
+ """
+ list_ = []
+ outputDict = None
+ for line in self.lines:
+ if "Core Average Exposure" in line:
+ if "EBAR" in line:
+ elems = line.strip().split()
+ spot = elems.index("EBAR")
+ list_.append(float(elems[spot+1]))
+ print(list_)
+ print(list_[-1])
+ if not list_:
+ return ValueError("No values returned. Check Simulate file executed correctly.")
+ else:
+ outputDict = {'info_ids':['core_avg_exp'], 'values': [list_[-1]]}
+ return outputDict
+
def writeCSV(self, fileout):
"""
Print Data into CSV format
diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py
index a59a96a913..d9f07068ad 100644
--- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py
+++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SpecificParser.py
@@ -48,6 +48,12 @@ def getParameters(self):
self.symmetry = root.find('symmetry').text.strip()
self.numberAssemblies = int(root.find('number_assemblies').text)
self.reflectorFlag = root.find('reflector').text.strip()
+ self.activeHeight = root.find('active_height').text.strip()
+ self.bottomReflectorTypeNumber = root.find('bottom_reflector_typenumber').text.strip()
+ self.topReflectorTypeNumber = root.find('top_reflector_typenumber').text.strip()
+ self.freshFaDict = []
+ for freshFa in root.iter('FreshFA'):
+ self.freshFaDict.append(freshFa.attrib)
self.faDict = []
for fa in root.iter('FA'):
self.faDict.append(fa.attrib)
@@ -115,23 +121,56 @@ def generateSim3Input(self, parameter):
file_.write(f"'DIM.PWR' {parameter.coreWidth}/\n")
file_.write(f"'DIM.CAL' {parameter.axialNodes} 2 2/\n")
file_.write("'DIM.DEP' 'EXP' 'PIN' 'HTMO' 'HBOR' 'HTF' 'XEN' 'SAM' 'EBP'/ \n")
+ file_.write("'ERR.CHK' 'PERMIT'/\n")
file_.write("\n")
- loadingPattern = getMap(parameter, [int(child.attrib['FAid']) for child in self.data])
- file_.write(loadingPattern) # later, from get map
+ if parameter.batchNumber >=2:
+ file_.write("'FUE.LAB', 6/\n")
+ shufflingScheme = getShufflingScheme(parameter, [int(child.attrib['FAid']) for child in self.data])
+ file_.write(shufflingScheme)
+ else:
+ loadingPattern = getMap(parameter, [int(child.attrib['FAid']) for child in self.data])
+ file_.write(loadingPattern) # later, from get map
file_.write("\n")
+ # if parameter.batchNumber <=1:
+ # pass
+ # else:
+ # raise ValueError("Larger batch number is not available for this version")
if parameter.batchNumber <=1:
- pass
- else:
- raise ValueError("Larger batch number is not available for this version")
- file_.write(f"'LIB' '../../{parameter.csLib}' \n")
+ file_.write(f"'LIB' '../../{parameter.csLib}' \n")
file_.write(f"'COR.OPE' {parameter.power}, {parameter.flow}, {parameter.pressure}/\n")
file_.write("'COR.TIN' {}/ \n".format(parameter.inletTemperature))
file_.write("\n")
if parameter.batchNumber >= 2:
+ # file_.write("'COM' SERIAL NUMBER TO FUEL BATCH \n")
+ # file_.write("'COM' LABEL CREATE TYPE NUMBER \n")
+ # for item in parameter.freshFaDict:
+ # file_.write(f"'FUE.NEW', 'TYPE{item['type']}', '{item['serial_label']}{parameter.batchNumber}00', {item['quantity']}, {item['type']}, ,, {parameter.batchNumber}/\n")
+ file_.write("\n")
+ file_.write(f"'RES' '../../{parameter.restartFile}' {parameter.loadPoint}/\n")
+ file_.write(f"'LIB' '../../{parameter.csLib}' \n")
file_.write(f"'BAT.LAB' {parameter.batchNumber} 'CYC-{parameter.batchNumber}' /\n")
+ file_.write("\n")
+ # for item in parameter.freshFaDict:
+ # file_.write(f"'SEG.LIB' {item['type']} '{item['name']}'/\n")
+ # file_.write(f"'FUE.ZON' {item['type']}, 1, '{item['name']}' {parameter.bottomReflectorTypeNumber},0.0 {item['type']}, {parameter.activeHeight} {parameter.topReflectorTypeNumber}/\n")
+ # file_.write("\n")
+ file_.write("'DEP.STA' 'BOS' 0.0/\n")
+ file_.write("'DEP.FPD' 2 .5/ * Equilibrium I and Xe, update Pm and Sm by depletion, depletion time subinterval is 0.5 hrs \n")
file_.write(f"'DEP.CYC' 'CYCLE{parameter.batchNumber}' 0.0 {parameter.batchNumber}/\n")
+ file_.write("\n")
+ file_.write("'ITE.BOR' 1400/ * Estimate of critical boron concentration \n")
+ file_.write("\n")
+ file_.write("'STA'/\n")
+
+ file_.write("\n")
file_.write(f"'DEP.STA' 'AVE' 0.0 0.5 1 2 -1 {parameter.depletion} /\n")
file_.write("'ITE.SRC' 'SET' 'EOLEXP' , , 0.02, , , , , , 'MINBOR' 10., , , , , 4, 4, , , /\n")
+ file_.write("\n")
+ # file_.write("'This is just a test' /\n")
+ # file_.write(f"This is the active height: {parameter.activeHeight}/\n")
+ if parameter.batchNumber >= 2:
+ file_.write("'FUE.INI', 'JILAB'/\n")
+ # file_.write(f"'WRE' 'cycle{parameter.batchNumber}.res'/\n")
file_.write("'STA'/\n")
file_.write("'END'/\n")
file_.close()
@@ -186,6 +225,168 @@ def getMap(parameter, locationList):
loadingPattern += "\n"
return loadingPattern
+# Code specific to shuffling schemes
+
+def findLabel(faID,faDict,quad):
+ """
+ Get type of FA ID
+ @ In, faID, int/str, the id for FA
+ @ In, faDict, list, list of FA xml input attributes
+ @ Out, faLabel, list, list of FA labels
+ """
+ faLabel = [id[f'type{quad}'] for id in faDict if id['FAid']==str(faID)][0]
+ if not faLabel:
+ return ValueError("Make sure labels are ordered.")
+ else:
+ faLabel = faLabel
+ return faLabel
+
+def quadrant_search(row, col, map_length):
+ """
+ Get quadrant in quarter symmetry.
+ @ In, row, of the FA evaluated
+ @ In, col, of the FA evaluated
+ @ Out, quad, quadrant in which the FA is located
+ """
+ # print(map_length)
+ if row > (map_length // 2) and col > (map_length // 2 - 1):
+ quad = 1
+ elif row > (map_length // 2 - 1) and col < (map_length // 2):
+ quad = 2
+ elif row < (map_length // 2) and col < (map_length // 2 + 1):
+ quad = 3
+ elif row < (map_length // 2 + 1) and col > (map_length // 2):
+ quad = 4
+ else:
+ quad = 1
+ return quad
+
+def octant_search(row, col, map_length):
+ """
+ Get octant in octant symmetry.
+ @ In, row, of the FA evaluated
+ @ In, col, of the FA evaluated
+ @ Out, oct, quadrant in which the FA is located
+ """
+ # print(map_length)
+ x = col - map_length // 2
+ # print(x)
+ y = map_length // 2 - row
+ # print(y)
+ oct = 0
+ diff = (abs(x) - abs(y))
+ if x > 0:
+ if y < 0:
+ if diff < 0:
+ oct = 1
+ elif diff > 0:
+ oct = 8
+ elif y > 0:
+ if diff > 0:
+ oct = 7
+ elif diff < 0:
+ oct = 6
+ elif x < 0:
+ if y < 0:
+ if diff < 0:
+ oct = 2
+ elif diff > 0:
+ oct = 3
+ elif y > 0:
+ if diff > 0:
+ oct = 4
+ elif diff < 0:
+ oct = 5
+ # To check vertical and horizontal centerlines
+ if x == 0:
+ if y < 0:
+ oct = 1
+ else:
+ oct = 3
+ elif y == 0:
+ if x < 0:
+ oct = 2
+ else:
+ oct = 4
+ # To check for the diagonals
+ if diff == 0:
+ if x > 0 and y < 0:
+ oct = 1
+ elif x < 0 and y < 0:
+ oct = 2
+ elif x < 0 and y > 0:
+ oct = 3
+ elif x > 0 and y > 0:
+ oct = 4
+
+ return oct
+
+def getShufflingScheme(parameter, locationList):
+ """
+ Genrate Shuffling Scheme
+ @ In, parameter, DataParser Object Instance, Instance store the parameter data
+ @ In, locationList, list, Location list from PerturbedPaser class
+ @ Out, shufflingScheme, str, Shuffling Scheme
+ """
+ maxLabel = max([len(id['type1']) for id in parameter.faDict])
+ numberSpaces = maxLabel + 3
+ problemMap = getCoreMap(parameter.mapSize, parameter.symmetry,
+ parameter.numberAssemblies, parameter.reflectorFlag)
+ rowCount = 1
+ shufflingScheme = ""
+ faDict = parameter.faDict
+ Quarter_symmetries = ("QUARTER_ROTATIONAL","QUARTER_MIRROR")
+ # print(faDict)
+ for row in range(25): #max core 25x25
+ if row in problemMap:
+ if rowCount <= 9:
+ shufflingScheme += f"{rowCount} 1 "
+ else:
+ shufflingScheme += f"{rowCount} 1 "
+ for col in range(25):
+ if col in problemMap[row]:
+ if not problemMap[row][col]:
+ if isinstance(problemMap[row][col], int):
+ geneNumber = problemMap[row][col]
+ gene = locationList[geneNumber]
+ # if parameter.symmetry == 'quarter_rotational':
+ if parameter.symmetry.upper() in Quarter_symmetries:
+ # print("quarter_rotational")
+ quad = quadrant_search(row, col, len(problemMap))
+ value = findLabel(gene, faDict, quad)
+ elif parameter.symmetry.upper() == 'OCTANT':
+ oct = octant_search(row, col, len(problemMap))
+ value = findLabel(gene, faDict, oct)
+ else:
+ value = findType(gene,faDict)
+ str_ = f"{value}"
+ shufflingScheme += f"{str_.ljust(numberSpaces)}"
+ else:
+ shufflingScheme += f"{' '.ljust(numberSpaces)}"
+ else:
+ geneNumber = problemMap[row][col]
+ gene = locationList[geneNumber]
+ # if parameter.symmetry == 'quarter_rotational':
+ if parameter.symmetry.upper() in Quarter_symmetries:
+ # print("Quarter_rotational")
+ # print(f"This is the map length {len(problemMap)}")
+ quad = quadrant_search(row, col, len(problemMap))
+ # print(f"This is the current quadrant {quad}")
+ value = findLabel(gene, faDict, quad)
+ # print(f"This is the value: {value}")
+ elif parameter.symmetry.upper() == 'OCTANT':
+ oct = octant_search(row, col, len(problemMap))
+ value = findLabel(gene, faDict, oct)
+ else:
+ value = findType(gene,faDict)
+ str_ = f"{value}"
+ shufflingScheme += f"{str_.ljust(numberSpaces)}"
+ shufflingScheme += "\n"
+ rowCount += 1
+ shufflingScheme += "0 0"
+ shufflingScheme += "\n"
+
+ return shufflingScheme
def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag):
"""
@@ -198,7 +399,7 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag):
"""
if mapSize.lower() == "full_core" or mapSize.lower() == "full":
mapKey = "FULL"
- allowedSymmetries = ("OCTANT","QUARTER_ROTATIONAL","QUARTER_MIRROR")
+ allowedSymmetries = ("OCTANT","QUARTER_ROTATIONAL","QUARTER_MIRROR", "NO_SYMMETRY")
if symmetry.upper() in allowedSymmetries:
symmetryKey = symmetry.upper()
else:
@@ -244,20 +445,56 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag):
15:{0:None,1:None,2:None,3:None,4:31 ,5:30, 6:29,7:28,8:27,9:28,10:29,11:30, 12:31, 13:None,14:None, 15:None,16:None},
16:{0:None,1:None,2:None,3:None,4:None,5:None,6:34,7:33,8:32,9:33,10:34,11:None,12:None,13:None,14:None, 15:None,16:None}}
coreMaps['FULL']['OCTANT'][157]['WITHOUT_REFLECTOR'] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None,6:25,7:24,8:25,9:None,10:None,11:None,12:None,13:None,14:None},
- 1:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None},
- 2:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None},
- 3:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None},
- 4:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7, 9:8, 10:9, 11:13, 12:18, 13:23, 14:None},
- 5:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None},
- 6:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25},
- 7:{0:24, 1:20, 2:15, 3:10, 4:6, 5:3, 6:1, 7:0, 8:1, 9:3, 10:6, 11:10, 12:15, 13:20, 14:24},
- 8:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25},
- 9:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None},
- 10:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7,9:8, 10:9 , 11:13, 12:18, 13:23, 14:None},
- 11:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None},
- 12:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None},
- 13:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None},
- 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:25,7:24,8:25,9:None,10:None,11:None,12:None,13:None,14:None}}
+ 1:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None},
+ 2:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None},
+ 3:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None},
+ 4:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7, 9:8, 10:9, 11:13, 12:18, 13:23, 14:None},
+ 5:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None},
+ 6:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25},
+ 7:{0:24, 1:20, 2:15, 3:10, 4:6, 5:3, 6:1, 7:0, 8:1, 9:3, 10:6, 11:10, 12:15, 13:20, 14:24},
+ 8:{0:25, 1:21, 2:16, 3:11, 4:7, 5:4, 6:2, 7:1, 8:2, 9:4, 10:7, 11:11, 12:16, 13:21, 14:25},
+ 9:{0:None,1:22, 2:17, 3:12, 4:8, 5:5, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None},
+ 10:{0:None,1:23, 2:18, 3:13, 4:9, 5:8, 6:7, 7:6, 8:7, 9:8, 10:9 , 11:13, 12:18, 13:23, 14:None},
+ 11:{0:None,1:None,2:19, 3:14, 4:13, 5:12, 6:11,7:10,8:11,9:12, 10:13, 11:14, 12:19, 13:None,14:None},
+ 12:{0:None,1:None,2:None,3:19, 4:18, 5:17, 6:16,7:15,8:16,9:17, 10:18, 11:19, 12:None,13:None,14:None},
+ 13:{0:None,1:None,2:None,3:None,4:23, 5:22, 6:21,7:20,8:21,9:22, 10:23, 11:None,12:None,13:None,14:None},
+ 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:25,7:24,8:25,9:None,10:None,11:None,12:None,13:None,14:None}}
+coreMaps['FULL']['NO_SYMMETRY'] = {}
+coreMaps['FULL']['NO_SYMMETRY'][157] = {}
+coreMaps['FULL']['NO_SYMMETRY'][157]['WITH_REFLECTOR'] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None, 6:0, 7:1, 8:2, 9:3, 10:4, 11:None,12:None,13:None,14:None, 15:None,16:None},
+ 1:{0:None,1:None,2:None,3:None,4:5 ,5:6 , 6:7 , 7:8, 8:9, 9:10, 10:11, 11:12, 12:13, 13:None,14:None, 15:None,16:None},
+ 2:{0:None,1:None,2:None,3:14 ,4:15 ,5:16, 6:17, 7:18, 8:19, 9:20, 10:21, 11:22, 12:23, 13:24, 14:None, 15:None,16:None},
+ 3:{0:None,1:None,2:25 ,3:26 ,4:27 ,5:28, 6:29, 7:30, 8:31, 9:32, 10:33, 11:34, 12:35, 13:36, 14:37 , 15:None,16:None},
+ 4:{0:None,1:38, 2:39 ,3:40 ,4:41 ,5:42, 6:43, 7:44, 8:45, 9:46, 10:47, 11:48, 12:49, 13:50, 14:51 , 15:52, 16:None},
+ 5:{0:None,1:53, 2:54 ,3:55 ,4:56 ,5:57, 6:58, 7:59, 8:60, 9:61, 10:62, 11:63, 12:64, 13:65, 14:66 , 15:67, 16:None},
+ 6:{0:68, 1:69, 2:70 ,3:71 ,4:72 ,5:73, 6:74, 7:75, 8:76, 9:77, 10:78, 11:79, 12:80, 13:81, 14:82 , 15:83, 16:84},
+ 7:{0:85, 1:86, 2:87 ,3:88 ,4:89 ,5:90, 6:91, 7:92, 8:93, 9:94, 10:95, 11:96, 12:97, 13:98, 14:99 , 15:100, 16:101},
+ 8:{0:102, 1:103, 2:104 ,3:105 ,4:106 ,5:107, 6:108,7:109,8:110,9:111,10:112,11:113, 12:114, 13:115, 14:116 , 15:117, 16:118},
+ 9:{0:119, 1:120, 2:121 ,3:122 ,4:123 ,5:124, 6:125,7:126,8:127,9:128,10:129,11:130, 12:131, 13:132, 14:133 , 15:134, 16:135},
+ 10:{0:136, 1:137, 2:138 ,3:139 ,4:140 ,5:141, 6:142,7:143,8:144,9:145,10:146,11:147, 12:148, 13:149, 14:150 , 15:151, 16:152},
+ 11:{0:None,1:153, 2:154 ,3:155 ,4:156 ,5:157, 6:158,7:159,8:160,9:161,10:162,11:163, 12:164, 13:165, 14:166 , 15:167, 16:None},
+ 12:{0:None,1:168, 2:169 ,3:170 ,4:171 ,5:172, 6:173,7:174,8:175,9:176,10:177,11:178, 12:179, 13:180, 14:181 , 15:182, 16:None},
+ 13:{0:None,1:None,2:183 ,3:184 ,4:185 ,5:186, 6:187,7:188,8:189,9:190,10:191,11:192, 12:193, 13:194, 14:195 , 15:None,16:None},
+ 14:{0:None,1:None,2:None,3:196 ,4:197 ,5:198, 6:199,7:200,8:201,9:202,10:203,11:204, 12:205, 13:206, 14:None, 15:None,16:None},
+ 15:{0:None,1:None,2:None,3:None,4:207 ,5:208, 6:209,7:210,8:211,9:212,10:213,11:214, 12:215, 13:None,14:None, 15:None,16:None},
+ 16:{0:None,1:None,2:None,3:None,4:None,5:None,6:216,7:217,8:218,9:219,10:220,11:None,12:None,13:None,14:None, 15:None,16:None}}
+
+coreMaps['FULL']['NO_SYMMETRY'][157]['WITHOUT_REFLECTOR'] = { 0:{0:None,1:None,2:None,3:None,4:None,5:None, 6:0, 7:1, 8:2, 9:None,10:None,11:None,12:None,13:None,14:None},
+ 1:{0:None,1:None,2:None,3:None,4:3, 5:4, 6:5, 7:6, 8:7, 9:8, 10:9, 11:None,12:None,13:None,14:None},
+ 2:{0:None,1:None,2:None,3:10, 4:11, 5:12, 6:13, 7:14, 8:15, 9:16, 10:17, 11:18, 12:None,13:None,14:None},
+ 3:{0:None,1:None,2:19, 3:20, 4:21, 5:22, 6:23, 7:24, 8:25, 9:26, 10:27, 11:28, 12:29, 13:None,14:None},
+ 4:{0:None,1:30, 2:31, 3:32, 4:33, 5:34, 6:35, 7:36, 8:37, 9:38, 10:39, 11:40, 12:41, 13:42, 14:None},
+ 5:{0:None,1:43, 2:44, 3:45, 4:46, 5:47, 6:48, 7:49, 8:50, 9:51, 10:52, 11:53, 12:54, 13:55, 14:None},
+ 6:{0:56, 1:57, 2:58, 3:59, 4:60, 5:61, 6:62, 7:63, 8:64, 9:65, 10:66, 11:67, 12:68, 13:69, 14:70},
+ 7:{0:71, 1:72, 2:73, 3:74, 4:75, 5:76, 6:77, 7:78, 8:79, 9:80, 10:81, 11:82, 12:83, 13:84, 14:85},
+ 8:{0:86, 1:87, 2:88, 3:89, 4:90, 5:91, 6:92, 7:93, 8:94, 9:95, 10:96, 11:97, 12:98, 13:99, 14:100},
+ 9:{0:None,1:101, 2:102, 3:103, 4:104, 5:105, 6:106,7:107,8:108,9:109, 10:110, 11:111, 12:112, 13:113, 14:None},
+ 10:{0:None,1:114, 2:115, 3:116, 4:117, 5:118, 6:119,7:120,8:121,9:122, 10:123, 11:124, 12:125, 13:126, 14:None},
+ 11:{0:None,1:None,2:127, 3:128, 4:129, 5:130, 6:131,7:132,8:133,9:134, 10:135, 11:136, 12:137, 13:None,14:None},
+ 12:{0:None,1:None,2:None,3:138, 4:139, 5:140, 6:141,7:142,8:143,9:144, 10:145, 11:146, 12:None,13:None,14:None},
+ 13:{0:None,1:None,2:None,3:None,4:147, 5:148, 6:149,7:150,8:151,9:152, 10:153, 11:None,12:None,13:None,14:None},
+ 14:{0:None,1:None,2:None,3:None,4:None,5:None,6:154,7:155,8:156,9:None,10:None,11:None,12:None,13:None,14:None}}
+
coreMaps['FULL']['OCTANT'][193] = {}
coreMaps['FULL']['OCTANT'][193]['WITH_REFLECTOR'] = {0:{0:None,1:None,2:None,3:None,4:39,5:38,6:37,7:36,8:35,9:36,10:37,11:38,12:39,13:None,14:None,15:None,16:None},
1:{0:None,1:None,2:34, 3:33, 4:32,5:31,6:30,7:29,8:28,9:29,10:30,11:31,12:32,13:33, 14:34, 15:None,16:None},
@@ -438,20 +675,20 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag):
15:{0:None,1:None,2:None,3:None,4:34, 5:26, 6:17, 7:8, 8:48,9:49,10:50,11:51, 12:52, 13:None,14:None,15:None,16:None},
16:{0:None,1:None,2:None,3:None,4:None,5:None,6:18, 7:9, 8:53,9:54,10:55,11:None,12:None,13:None,14:None,15:None,16:None}}
coreMaps['FULL']['QUARTER_ROTATIONAL'][157]['WITHOUT_REFLECTOR'] = {0 :{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:39, 7:38, 8: 8, 9:None, 10:None , 11:None, 12:None, 13:None, 14:None},
- 1 :{0:None, 1:None, 2:None, 3:None, 4:37, 5:36, 6:35, 7:34, 8: 7, 9:15, 10:22, 11:None, 12:None, 13:None, 14:None},
- 2 :{0:None, 1:None, 2:None, 3:33, 4:32, 5:31, 6:30, 7:29, 8: 6, 9:14, 10:21, 11:28, 12:None, 13:None, 14:None},
- 3 :{0:None, 1:None, 2:28, 3:27, 4:26, 5:25, 6:24, 7:23, 8: 5, 9:13, 10:20, 11:27, 12:33, 13:None, 14:None},
- 4 :{0:None, 1:22, 2:21, 3:20, 4:19, 5:18, 6:17, 7:16, 8: 4, 9:12, 10:19, 11:26, 12:32, 13:37, 14:None},
- 5 :{0:None, 1:15, 2:14, 3:13, 4:12, 5:11, 6:10, 7: 9, 8: 3, 9:11, 10:18, 11:25, 12:31, 13:36, 14:None},
- 6 :{0: 8, 1: 7, 2: 6, 3: 5, 4: 4, 5: 3, 6:2, 7: 1, 8: 2, 9:10, 10:17, 11:24, 12:30, 13:35, 14:39},
- 7 :{0:38, 1:34, 2:29, 3:23, 4:16, 5: 9, 6:1, 7: 0, 8: 1, 9: 9, 10:16, 11:23, 12:29, 13:34, 14:38},
- 8 :{0:39, 1:35, 2:30, 3:24, 4:17, 5:10, 6:2, 7: 1, 8: 2, 9: 3, 10: 4, 11: 5, 12: 6, 13: 7, 14: 8},
- 9 :{0:None, 1:36, 2:31, 3:25, 4:18, 5:11, 6:4, 7:3, 8:4, 9:5, 10:8, 11:12, 12:17, 13:22, 14:None},
- 10:{0:None, 1:37, 2:32, 3:26, 4:19, 5:12, 6:4, 7:16, 8:17, 9:18, 10:19, 11:20, 12:21, 13:22, 14:None},
- 11:{0:None, 1:None, 2:33, 3:27, 4:20, 5:13, 6:5, 7:23, 8:24, 9:25, 10:26, 11:27, 12:28, 13:None, 14:None},
- 12:{0:None, 1:None, 2:None, 3:28, 4:21, 5:14, 6:6, 7:29, 8:30, 9:31, 10:32, 11:33, 12:None, 13:None, 14:None},
- 13:{0:None, 1:None, 2:None, 3:None, 4:22, 5:15, 6:7, 7:34, 8:35, 9:36, 10:37, 11:None, 12:None, 13:None, 14:None},
- 14:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:8, 7:38, 8:39, 9:None, 10:None , 11:None, 12:None, 13:None, 14:None}}
+ 1 :{0:None, 1:None, 2:None, 3:None, 4:37, 5:36, 6:35, 7:34, 8: 7, 9:15, 10:22, 11:None, 12:None, 13:None, 14:None},
+ 2 :{0:None, 1:None, 2:None, 3:33, 4:32, 5:31, 6:30, 7:29, 8: 6, 9:14, 10:21, 11:28, 12:None, 13:None, 14:None},
+ 3 :{0:None, 1:None, 2:28, 3:27, 4:26, 5:25, 6:24, 7:23, 8: 5, 9:13, 10:20, 11:27, 12:33, 13:None, 14:None},
+ 4 :{0:None, 1:22, 2:21, 3:20, 4:19, 5:18, 6:17, 7:16, 8: 4, 9:12, 10:19, 11:26, 12:32, 13:37, 14:None},
+ 5 :{0:None, 1:15, 2:14, 3:13, 4:12, 5:11, 6:10, 7: 9, 8: 3, 9:11, 10:18, 11:25, 12:31, 13:36, 14:None},
+ 6 :{0: 8, 1: 7, 2: 6, 3: 5, 4: 4, 5: 3, 6:2, 7: 1, 8: 2, 9:10, 10:17, 11:24, 12:30, 13:35, 14:39},
+ 7 :{0:38, 1:34, 2:29, 3:23, 4:16, 5: 9, 6:1, 7: 0, 8: 1, 9: 9, 10:16, 11:23, 12:29, 13:34, 14:38},
+ 8 :{0:39, 1:35, 2:30, 3:24, 4:17, 5:10, 6:2, 7: 1, 8: 2, 9: 3, 10: 4, 11: 5, 12: 6, 13: 7, 14: 8},
+ 9 :{0:None, 1:36, 2:31, 3:25, 4:18, 5:11, 6:3, 7:9, 8:10, 9:11, 10:12, 11:13, 12:14, 13:15, 14:None},
+ 10:{0:None, 1:37, 2:32, 3:26, 4:19, 5:12, 6:4, 7:16, 8:17, 9:18, 10:19, 11:20, 12:21, 13:22, 14:None},
+ 11:{0:None, 1:None, 2:33, 3:27, 4:20, 5:13, 6:5, 7:23, 8:24, 9:25, 10:26, 11:27, 12:28, 13:None, 14:None},
+ 12:{0:None, 1:None, 2:None, 3:28, 4:21, 5:14, 6:6, 7:29, 8:30, 9:31, 10:32, 11:33, 12:None, 13:None, 14:None},
+ 13:{0:None, 1:None, 2:None, 3:None, 4:22, 5:15, 6:7, 7:34, 8:35, 9:36, 10:37, 11:None, 12:None, 13:None, 14:None},
+ 14:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:8, 7:38, 8:39, 9:None, 10:None , 11:None, 12:None, 13:None, 14:None}}
coreMaps['FULL']['QUARTER_ROTATIONAL'][193] = {}
coreMaps['FULL']['QUARTER_ROTATIONAL'][193]['WITH_REFLECTOR'] = {0:{0:None,1:None,2:None,3:None,4:None,5:78,6:77,7:76,8:75,9:74,10:75,11:76,12:77,13:78,14:None,15:None,16:None,17:None,18:None},
@@ -642,3 +879,35 @@ def getCoreMap(mapSize, symmetry, numberAssemblies, reflectorFlag):
14:{8:21, 9:22, 10:23, 11:24, 12:25, 13:26, 14:27, 15:None,16:None},
15:{8:28, 9:29, 10:30, 11:31, 12:32, 13:33, 14:None,15:None,16:None},
16:{8:34, 9:35, 10:36, 11:37, 12:None,13:None,14:None,15:None,16:None}}
+
+### shuffleMaps value
+
+shuffleMap = {}
+shuffleMap['FULL'] = {}
+shuffleMap['FULL']['NO_SYMMETRY'] = {}
+shuffleMap['FULL']['NO_SYMMETRY'][157] = { 0:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:"J-01",7:"H-01",8:"G-01",9:None, 10:None, 11:None, 12:None, 13:None, 14:None},
+ 1:{0:None, 1:None, 2:None, 3:None, 4:"L-02",5:"K-02",6:"J-02",7:"H-02",8:"G-02",9:"F-02",10:"E-02",11:None, 12:None, 13:None, 14:None},
+ 2:{0:None, 1:None, 2:None, 3:"M-03",4:"L-03",5:"K-03",6:"J-03",7:"H-03",8:"G-03",9:"F-03",10:"E-03",11:"D-03",12:None, 13:None, 14:None},
+ 3:{0:None, 1:None, 2:"N-04",3:"M-04",4:"L-04",5:"K-04",6:"J-04",7:"H-04",8:"G-04",9:"F-04",10:"E-04",11:"D-04",12:"C-04",13:None, 14:None},
+ 4:{0:None, 1:"P-05",2:"N-05",3:"M-05",4:"L-05",5:"K-05",6:"J-05",7:"H-05",8:"G-05",9:"F-05",10:"E-05",11:"D-05",12:"C-05",13:"B-05",14:None},
+ 5:{0:None, 1:"P-06",2:"N-06",3:"M-06",4:"L-06",5:"K-06",6:"J-06",7:"H-06",8:"G-06",9:"F-06",10:"E-06",11:"D-06",12:"C-06",13:"B-06",14:None},
+ 6:{0:"R-07",1:"P-07",2:"N-07",3:"M-07",4:"L-07",5:"K-07",6:"J-07",7:"H-07",8:"G-07",9:"F-07",10:"E-07",11:"D-07",12:"C-07",13:"B-07",14:"A-07"},
+ 7:{0:"R-08",1:"P-08",2:"N-08",3:"M-08",4:"L-08",5:"K-08",6:"J-08",7:"H-08",8:"G-08",9:"F-08",10:"E-08",11:"D-08",12:"C-08",13:"B-08",14:"A-08"},
+ 8:{0:"R-09",1:"P-09",2:"N-09",3:"M-09",4:"L-09",5:"K-09",6:"J-09",7:"H-09",8:"G-09",9:"F-09",10:"E-09",11:"D-09",12:"C-09",13:"B-09",14:"A-08"},
+ 9:{0:None, 1:"P-10",2:"N-10",3:"M-10",4:"L-10",5:"K-10",6:"J-10",7:"H-10",8:"G-10",9:"F-10",10:"E-10",11:"D-10",12:"C-10",13:"B-10",14:None},
+ 10:{0:None, 1:"P-11",2:"N-11",3:"M-11",4:"L-11",5:"K-11",6:"J-11",7:"H-11",8:"G-11",9:"F-11",10:"E-11",11:"D-11",12:"C-11",13:"B-11",14:None},
+ 11:{0:None, 1:None, 2:"N-12",3:"M-12",4:"L-12",5:"K-12",6:"J-12",7:"H-12",8:"G-12",9:"F-12",10:"E-12",11:"D-12",12:"C-12",13:None, 14:None},
+ 12:{0:None, 1:None, 2:None, 3:"M-13",4:"L-13",5:"K-13",6:"J-13",7:"H-13",8:"G-13",9:"F-13",10:"E-13",11:"D-13",12:None, 13:None, 14:None},
+ 13:{0:None, 1:None, 2:None, 3:None, 4:"L-14",5:"K-14",6:"J-14",7:"H-14",8:"G-14",9:"F-14",10:"E-14",11:None, 12:None, 13:None, 14:None},
+ 14:{0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:"J-15",7:"H-15",8:"G-15",9:None, 10:None, 11:None, 12:None, 13:None, 14:None}}
+
+shuffleMap['FULL']['QUARTER'] = {}
+shuffleMap['FULL']['QUARTER'][157] = {}
+shuffleMap['FULL']['QUARTER'][157] = {7 :{7: 0, 8: 1, 9: 9, 10:16, 11:23, 12:29, 13:34, 14:38},
+ 8 :{7: 1, 8: 2, 9: 3, 10: 4, 11: 5, 12: 6, 13: 7, 14: 8},
+ 9 :{7: 9, 8:10, 9:11, 10:12, 11:13, 12:14, 13:15, 14:None},
+ 10:{7:16, 8:17, 9:18, 10:19, 11:20, 12:21, 13:22, 14:None},
+ 11:{7:23, 8:24, 9:25, 10:26, 11:27, 12:28, 13:None,14:None},
+ 12:{7:29, 8:30, 9:31, 10:32, 11:33, 12:None, 13:None,14:None},
+ 13:{7:34, 8:35, 9:36, 10:37, 11:None,12:None, 13:None,14:None},
+ 14:{7:38, 8:39, 9:None,10:None,11:None,12:None, 13:None,14:None}}
\ No newline at end of file
diff --git a/ravenframework/Optimizers/BayesianOptimizer.py b/ravenframework/Optimizers/BayesianOptimizer.py
index 8378e334c6..ff1d743727 100644
--- a/ravenframework/Optimizers/BayesianOptimizer.py
+++ b/ravenframework/Optimizers/BayesianOptimizer.py
@@ -150,6 +150,7 @@ def __init__(self):
self._paramSelectionOptions = {'ftol':1e-10, 'maxiter':200, 'disp':False} # Optimizer options for hyperparameter selection
self._externalParamOptimizer = 'fmin_l_bfgs_b' # Optimizer for external hyperparameter selection
self._resetModel = False # Reset regression model if True
+ self._canHandleMultiObjective = False # boolean indicator whether optimization is a sinlge-objective problem or a multi-objective problem
def handleInput(self, paramInput):
"""
@@ -232,8 +233,8 @@ def initialize(self, externalSeeding=None, solutionExport=None):
elif len(self._model.supervisedContainer[0].target) != 1:
self.raiseAnError(RuntimeError, f'Only one target allowed when using GPR ROM for Bayesian Optimizer! '
f'Received {len(self._model.supervisedContainer[0].target)}')
- elif self._objectiveVar not in self._model.supervisedContainer[0].target:
- self.raiseAnError(RuntimeError, f'GPR ROM should be obective variable: {self._objectiveVar}, '
+ elif self._objectiveVar[0] not in self._model.supervisedContainer[0].target:
+ self.raiseAnError(RuntimeError, f'GPR ROM should be obective variable: {self._objectiveVar[0]}, '
f'Received {self._model.supervisedContainer[0].target}')
if self._resetModel:
@@ -265,8 +266,8 @@ def initialize(self, externalSeeding=None, solutionExport=None):
trainingData = self.normalizeData(trainingData)
for varName in self.toBeSampled.keys():
self._trainingInputs[0][varName] = list(trainingData[varName])
- self._trainingTargets.append(list(trainingData[self._objectiveVar]))
- self.raiseAMessage(f"{self._model.name} ROM has been already trained with {len(trainingData[self._objectiveVar])} samples!",
+ self._trainingTargets.append(list(trainingData[self._objectiveVar[0]]))
+ self.raiseAMessage(f"{self._model.name} ROM has been already trained with {len(trainingData[self._objectiveVar[0]])} samples!",
"This pre-trained ROM will be used by Optimizer to evaluate the next best point!")
# retrieving the best solution is based on the acqusition function's utility
# Constraints are considered in the following method.
@@ -333,7 +334,7 @@ def _useRealization(self, info, rlz):
# Add new inputs and model evaluations to the dataset
for varName in list(self.toBeSampled):
self._trainingInputs[traj][varName].extend(getattr(rlz, varName).values)
- self._trainingTargets[traj].extend(getattr(rlz, self._objectiveVar).values)
+ self._trainingTargets[traj].extend(getattr(rlz, self._objectiveVar[0]).values)
# Generate posterior with training data
self._generatePredictiveModel(traj)
self._resolveMultiSample(traj, rlz, info)
@@ -343,10 +344,10 @@ def _useRealization(self, info, rlz):
# Add new input and model evaluation to the dataset
for varName in list(self.toBeSampled):
self._trainingInputs[traj][varName].append(rlz[varName])
- self._trainingTargets[traj].append(rlz[self._objectiveVar])
+ self._trainingTargets[traj].append(rlz[self._objectiveVar[0]])
# Generate posterior with training data
self._generatePredictiveModel(traj)
- optVal = rlz[self._objectiveVar]
+ optVal = rlz[self._objectiveVar[0]]
self._resolveNewOptPoint(traj, rlz, optVal, info)
# Use acquisition to select next point
@@ -555,7 +556,7 @@ def _trainRegressionModel(self, traj):
for varName in list(self.toBeSampled):
trainingSet[varName] = np.asarray(self._trainingInputs[traj][varName])
- trainingSet[self._objectiveVar] = np.asarray(self._trainingTargets[traj])
+ trainingSet[self._objectiveVar[0]] = np.asarray(self._trainingTargets[traj])
self._model.train(trainingSet)
# NOTE It would be preferrable to use targetEvaluation;
# however, there does not appear a built in normalization method and as
@@ -596,8 +597,8 @@ def _evaluateRegressionModel(self, featurePoint):
# Evaluating the regression model
resultsDict = self._model.evaluate(featurePoint)
# NOTE only allowing single targets, needs to be fixed when multi-objective optimization is added
- mu = resultsDict[self._objectiveVar]
- std = resultsDict[self._objectiveVar+'_std']
+ mu = resultsDict[self._objectiveVar[0]]
+ std = resultsDict[self._objectiveVar[0]+'_std']
return mu, std
# * * * * * * * * * * * *
@@ -627,7 +628,7 @@ def _resolveMultiSample(self, traj, rlz, info):
for index in range(info['batchSize']):
for varName in rlzVars:
singleRlz[varName] = getattr(rlz, varName)[index].values
- optVal = singleRlz[self._objectiveVar]
+ optVal = singleRlz[self._objectiveVar[0]]
self._resolveNewOptPoint(traj, singleRlz, optVal, info)
singleRlz = {} # FIXME is this necessary?
self.raiseADebug(f'Multi-sample resolution completed')
@@ -664,7 +665,7 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info):
currentPoint = {}
for decisionVarName in list(self.toBeSampled):
currentPoint[decisionVarName] = rlz[decisionVarName]
- rlz[self._objectiveVar] = self._evaluateRegressionModel(currentPoint)[0][0]
+ rlz[self._objectiveVar[0]] = self._evaluateRegressionModel(currentPoint)[0][0]
self.raiseADebug('*' * 80)
if acceptable in ['accepted', 'first']:
# record history
@@ -675,13 +676,13 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info):
# If the last recommended solution point is the same, update the expected function value
if all(old[var] == xStar[var] for var in list(self.toBeSampled)):
newEstimate = copy.copy(old)
- newEstimate[self._objectiveVar] = muStar
+ newEstimate[self._objectiveVar[0]] = muStar
self._optPointHistory[traj].append((newEstimate, info))
else:
newRealization = copy.copy(old)
for var in list(self.toBeSampled):
newRealization[var] = xStar[var]
- newRealization[self._objectiveVar] = muStar
+ newRealization[self._objectiveVar[0]] = muStar
else:
self.raiseAnError(f'Unrecognized acceptability: "{acceptable}"')
diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py
index e7d9e3b941..243ed9e933 100644
--- a/ravenframework/Optimizers/GeneticAlgorithm.py
+++ b/ravenframework/Optimizers/GeneticAlgorithm.py
@@ -17,12 +17,14 @@
Genetic Algorithm-based optimization. Multiple strategies for
mutations, cross-overs, etc. are available.
Created June,3,2020
- @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
+ Updated Sepember,17,2023
+ @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim
References
----------
.. [1] Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73.
- [2] Z. Michalewicz, "Genetic Algorithms. + Data Structures. = Evolution Programs," Third, Revised
- and Extended Edition, Springer (1996).
+ [2] Z. Michalewicz, "Genetic Algorithms. + Data Structures. = Evolution Programs," Third, Revised and Extended Edition, Springer (1996).
+ [3] Deb, Kalyanmoy, et al. "A fast and elitist multiobjective genetic algorithm: NSGA-II." IEEE transactions on evolutionary computation 6.2 (2002): 182-197.
+ [4] Deb, Kalyanmoy. "An efficient constraint handling method for genetic algorithms." Computer methods in applied mechanics and engineering 186.2-4 (2000): 311-338.
"""
# External Modules----------------------------------------------------------------------------------
from collections import deque, defaultdict
@@ -39,8 +41,10 @@
from .crossOverOperators.crossovers import returnInstance as crossoversReturnInstance
from .mutators.mutators import returnInstance as mutatorsReturnInstance
from .survivorSelectors.survivorSelectors import returnInstance as survivorSelectionReturnInstance
+from .survivorSelection import survivorSelection as survivorSelectionProcess
from .fitness.fitness import returnInstance as fitnessReturnInstance
from .repairOperators.repair import returnInstance as repairReturnInstance
+
# Internal Modules End------------------------------------------------------------------------------
class GeneticAlgorithm(RavenSampled):
@@ -49,7 +53,7 @@ class GeneticAlgorithm(RavenSampled):
"""
convergenceOptions = {'objective': r""" provides the desired value for the convergence criterion of the objective function
($\epsilon^{obj}$). In essence this is solving the inverse problem of finding the design variable
- at a given objective value, i.e., convergence is reached when: $$ Objective = \epsilon^{obj}$$.
+ at a given objective value, i.e., convergence is reached when: $$ Objective = \epsilon^{obj}$$
\default{1e-6}, if no criteria specified""",
'AHDp': r""" provides the desired value for the Average Hausdorff Distance between populations""",
'AHD': r""" provides the desired value for the Hausdorff Distance between populations""",
@@ -71,38 +75,50 @@ def __init__(self):
self._acceptRerun = {} # by traj, if True then override accept for point rerun
self._convergenceInfo = {} # by traj, the persistence and convergence information for most recent opt
self._requiredPersistence = 0 # consecutive persistence required to mark convergence
- self.needDenormalized() # the default in all optimizers is to normalize the data which is not the case here
+ self.needDenormalized() # the default in all optimizers is to normalize the data which is not the case here
self.batchId = 0
- self.population = None # panda Dataset container containing the population at the beginning of each generation iteration
- self.popAge = None # population age
- self.fitness = None # population fitness
- self.ahdp = np.NaN # p-Average Hausdorff Distance between populations
- self.ahd = np.NaN # Hausdorff Distance between populations
- self.hdsm = np.NaN # Hausdorff Distance Similarity metric between populations
- self.bestPoint = None
- self.bestFitness = None
- self.bestObjective = None
- self.objectiveVal = None
- self._populationSize = None
- self._parentSelectionType = None
- self._parentSelectionInstance = None
- self._nParents = None
- self._nChildren = None
- self._crossoverType = None
- self._crossoverPoints = None
- self._crossoverProb = None
- self._crossoverInstance = None
- self._mutationType = None
- self._mutationLocs = None
- self._mutationProb = None
- self._mutationInstance = None
- self._survivorSelectionType = None
- self._survivorSelectionInstance = None
- self._fitnessType = None
- self._objCoeff = None
- self._penaltyCoeff = None
- self._fitnessInstance = None
- self._repairInstance = None
+ self.population = None # panda Dataset container containing the population at the beginning of each generation iteration
+ self.popAge = None # population age
+ self.fitness = None # population fitness
+ self.rank = None # population rank (for Multi-objective optimization only)
+ self.constraintsV = None # calculated contraints value
+ self.crowdingDistance = None # population crowding distance (for Multi-objective optimization only)
+ self.ahdp = np.NaN # p-Average Hausdorff Distance between populations
+ self.ahd = np.NaN # Hausdorff Distance between populations
+ self.hdsm = np.NaN # Hausdorff Distance Similarity metric between populations
+ self.bestPoint = None # the best solution (chromosome) found among population in a specific batchId
+ self.bestFitness = None # fitness value of the best solution found
+ self.bestObjective = None # objective value of the best solution found
+ self.multiBestPoint = None # the best solutions (chromosomes) found among population in a specific batchId
+ self.multiBestFitness = None # fitness values of the best solutions found
+ self.multiBestObjective = None # objective values of the best solutions found
+ self.multiBestConstraint = None # constraint values of the best solutions found
+ self.multiBestRank = None # rank values of the best solutions found
+ self.multiBestCD = None # crowding distance (CD) values of the best solutions found
+ self.objectiveVal = None # objective values of solutions
+ self._populationSize = None # number of population size
+ self._parentSelectionType = None # type of the parent selection process chosen
+ self._parentSelectionInstance = None # instance of the parent selection process chosen
+ self._nParents = None # number of parents
+ self._kSelection = None # number of chromosomes selected for tournament selection
+ self._nChildren = None # number of children
+ self._crossoverType = None # type of the crossover process chosen
+ self._crossoverPoints = None # point where crossover process will happen
+ self._crossoverProb = None # probability of crossover process will happen
+ self._crossoverInstance = None # instance of the crossover process chosen
+ self._mutationType = None # type of the mutation process chosen
+ self._mutationLocs = None # point where mutation process will happen
+ self._mutationProb = None # probability of mutation process will happen
+ self._mutationInstance = None # instance of the mutation process chosen
+ self._survivorSelectionType = None # type of the survivor selection process chosen
+ self._survivorSelectionInstance = None # instance of the survivor selection process chosen
+ self._fitnessType = None # type of the fitness calculation chosen
+ self._objCoeff = None # weight coefficients of objectives for fitness calculation
+ self._objectiveVar = None # objective variable names
+ self._penaltyCoeff = None # weight coefficients corresponding to constraints and objectives for fitness calculation
+ self._fitnessInstance = None # instance of fitness
+ self._repairInstance = None # instance of repair
+ self._canHandleMultiObjective = True # boolean indicator whether optimization is a sinlge-objective problem or a multi-objective problem
##########################
# Initialization Methods #
@@ -116,86 +132,92 @@ def getInputSpecification(cls):
@ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.
"""
specs = super(GeneticAlgorithm, cls).getInputSpecification()
- specs.description = r"""The \xmlNode{GeneticAlgorithm} optimizer is a metaheuristic approach
- to perform a global search in large design spaces. The methodology rose
- from the process of natural selection, and like others in the large class
- of the evolutionary algorithms, it utilizes genetic operations such as
- selection, crossover, and mutations to avoid being stuck in local minima
- and hence facilitates finding the global minima. More information can
- be found in:
- Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73."""
+ specs.description = r"""The \xmlNode{GeneticAlgorithm} is a metaheuristic optimization technique inspired by the principles
+ of natural selection and genetics. Introduced by John Holland in the 1960s, GA mimics the process of
+ biological evolution to solve complex optimization and search problems. They operate by maintaining a population of
+ potential solutions represented as as arrays of fixed length variables (genes), and each such array is called a chromosome.
+ These solutions undergo iterative refinement through processes such as mutation, crossover, and survivor selection. Mutation involves randomly altering certain genes within
+ individual solutions, introducing diversity into the population and enabling exploration of new regions in the solution space.
+ Crossover, on the other hand, mimics genetic recombination by exchanging genetic material between two parent solutions to create
+ offspring with combined traits. Survivor selection determines which solutions will advance to the next generation based on
+ their fitness—how well they perform in solving the problem at hand. Solutions with higher fitness scores are more likely to
+ survive and reproduce, passing their genetic material to subsequent generations. This iterative process continues
+ until a stopping criterion is met, typically when a satisfactory solution is found or after a predetermined number of generations.
+ More information can be found in:\\\\
+
+ Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73.\\\\
+
+ Non-dominated Sorting Genetic Algorithm II (NSGA-II) is a variant of GAs designed for multiobjective optimization problems.
+ NSGA-II extends traditional GAs by incorporating a ranking-based approach and crowding distance estimation to maintain a diverse set of
+ non-dominated (Pareto-optimal) solutions. This enables NSGA-II to efficiently explore trade-offs between conflicting objectives,
+ providing decision-makers with a comprehensive view of the problem's solution space. More information about NSGA-II can be found in:\\\\
+
+ Deb, Kalyanmoy, et al. "A fast and elitist multiobjective genetic algorithm: NSGA-II." IEEE transactions on evolutionary computation 6.2 (2002): 182-197.\\\\
+
+ GA in RAVEN supports for both single and multi-objective optimization problem."""
# GA Params
GAparams = InputData.parameterInputFactory('GAparams', strictMode=True,
printPriority=108,
- descr=r""" Genetic Algorithm Parameters:\begin{itemize}
- \item populationSize.
- \item parentSelectors:
- \begin{itemize}
- \item rouletteWheel.
- \item tournamentSelection.
- \item rankSelection.
- \end{itemize}
- \item Reproduction:
- \begin{itemize}
- \item crossover:
- \begin{itemize}
- \item onePointCrossover.
- \item twoPointsCrossover.
- \item uniformCrossover
- \end{itemize}
- \item mutators:
- \begin{itemize}
- \item swapMutator.
- \item scrambleMutator.
- \item inversionMutator.
- \item bitFlipMutator.
- \item randomMutator.
- \end{itemize}
- \end{itemize}
- \item survivorSelectors:
- \begin{itemize}
- \item ageBased.
- \item fitnessBased.
- \end{itemize}
- \end{itemize}""")
+ descr=r""" """)
# Population Size
populationSize = InputData.parameterInputFactory('populationSize', strictMode=True,
contentType=InputTypes.IntegerType,
printPriority=108,
descr=r"""The number of chromosomes in each population.""")
GAparams.addSub(populationSize)
+
+ #NOTE An indicator saying whather GA will handle constraint hardly or softly will be upgraded later @JunyungKim
+ # # Constraint Handling
+ # constraintHandling = InputData.parameterInputFactory('constraintHandling', strictMode=True,
+ # contentType=InputTypes.StringType,
+ # printPriority=108,
+ # descr=r"""a node indicating whether GA will handle constraints hardly or softly.""")
+ # GAparams.addSub(constraintHandling)
+
# Parent Selection
parentSelection = InputData.parameterInputFactory('parentSelection', strictMode=True,
contentType=InputTypes.StringType,
printPriority=108,
- descr=r"""A node containing the criterion based on which the parents are selected. This can be a
- fitness proportional selection such as:
- a. \textbf{\textit{rouletteWheel}},
- b. \textbf{\textit{tournamentSelection}},
- c. \textbf{\textit{rankSelection}}
- for all methods nParents is computed such that the population size is kept constant.
- $nChildren = 2 \times {nParents \choose 2} = nParents \times (nParents-1) = popSize$
- solving for nParents we get:
- $nParents = ceil(\frac{1 + \sqrt{1+4*popSize}}{2})$
- This will result in a popSize a little larger than the initial one, these excessive children will be later thrawn away and only the first popSize child will be kept""")
+ descr=r"""A node containing the criterion based on which the parents are selected. This can be a fitness proportional selection for all methods.
+ The number of parents (i.e., nParents) is computed such that the population size is kept constant. \\\\
+ $nParents = ceil(\frac{1 + \sqrt{1+4*popSize}}{2})$. \\\\
+ The number of children (i.e., nChildren) is computed by \\\\
+ $nChildren = 2 \times {nParents \choose 2} = nParents \times (nParents-1) = popSize$ \\\\
+ This will result in a popSize a little larger than the initial one, and the excessive children will be later thrawn away and only the first popSize child will be kept. \\\\
+ You can choose three options for parentSelection:
+ \begin{itemize}
+ \item \textit{rouletteWheel} - It assigns probabilities to chromosomes based on their fitness,
+ allowing for selection proportionate to their likelihood of being chosen for reproduction.
+ \item \textit{tournamentSelection} - Chromosomes are randomly chosen from the population to compete in a tournament,
+ and the fittest individual among them is selected for reproduction.
+ \item \textit{rankSelection} - Chromosomes with higher fitness values are selected.
+ \end{itemize}
+ """)
GAparams.addSub(parentSelection)
# Reproduction
reproduction = InputData.parameterInputFactory('reproduction', strictMode=True,
printPriority=108,
- descr=r"""a node containing the reproduction methods.
- This accepts subnodes that specifies the types of crossover and mutation.""")
+ descr=r"""a node containing the reproduction methods. This accepts subnodes that specifies the types of crossover and mutation. """)
+ # 0. k-selectionNumber of Parents
+ kSelection = InputData.parameterInputFactory('kSelection', strictMode=True,
+ contentType=InputTypes.IntegerType,
+ printPriority=108,
+ descr=r"""Number of chromosome selected for tournament selection""")
+ reproduction.addSub(kSelection)
# 1. Crossover
crossover = InputData.parameterInputFactory('crossover', strictMode=True,
contentType=InputTypes.StringType,
printPriority=108,
- descr=r"""a subnode containing the implemented crossover mechanisms.
- This includes: a. onePointCrossover,
- b. twoPointsCrossover,
- c. uniformCrossover.""")
+ descr=r"""a subnode containing the implemented crossover mechanisms. You can choose one of the crossover options listed below:
+ \begin{itemize}
+ \item \textit{onePointCrossover} - It selects a random crossover point along the chromosome of parent individuals and swapping the genetic material beyond that point to create offspring.
+ \item \textit{twoPointsCrossover} - It selects two random crossover points along the chromosome of parent individuals and swapping the genetic material beyond that point to create offspring.
+ \item \textit{uniformCrossover} - It randomly selects genes from two parent chromosomes with equal probability, creating offspring by exchanging genes at corresponding positions.
+ \end{itemize}""")
crossover.addParam("type", InputTypes.StringType, True,
- descr="type of crossover operation to be used (e.g., OnePoint, MultiPoint, or Uniform)")
+ descr="type of crossover operation to be used. See the list of options above.")
crossoverPoint = InputData.parameterInputFactory('points', strictMode=True,
contentType=InputTypes.IntegerListType,
printPriority=108,
@@ -211,14 +233,16 @@ def getInputSpecification(cls):
mutation = InputData.parameterInputFactory('mutation', strictMode=True,
contentType=InputTypes.StringType,
printPriority=108,
- descr=r"""a subnode containing the implemented mutation mechanisms.
- This includes: a. bitFlipMutation,
- b. swapMutation,
- c. scrambleMutation,
- d. inversionMutation, or
- e. randomMutator.""")
+ descr=r"""a subnode containing the implemented mutation mechanisms. You can choose one of the mutation options listed below:
+ \begin{itemize}
+ \item \textit{swapMutator} - It randomly selects two genes within an chromosome and swaps their positions.
+ \item \textit{scrambleMutator} - It randomly selects a subset of genes within an chromosome and shuffles their positions.
+ \item \textit{inversionMutator} - It selects a contiguous subset of genes within an chromosome and reverses their order.
+ \item \textit{bitFlipMutator} - It randomly selects genes within an chromosome and flips their values.
+ \item \textit{randomMutator} - It randomly selects a gene within an chromosome and mutates the gene.
+ \end{itemize} """)
mutation.addParam("type", InputTypes.StringType, True,
- descr="type of mutation operation to be used (e.g., bit, swap, or scramble)")
+ descr="type of mutation operation to be used. See the list of options above.")
mutationLocs = InputData.parameterInputFactory('locs', strictMode=True,
contentType=InputTypes.IntegerListType,
printPriority=108,
@@ -236,37 +260,45 @@ def getInputSpecification(cls):
survivorSelection = InputData.parameterInputFactory('survivorSelection', strictMode=True,
contentType=InputTypes.StringType,
printPriority=108,
- descr=r"""a subnode containing the implemented survivor selection mechanisms.
- This includes: a. ageBased, or
- b. fitnessBased.""")
+ descr=r"""a subnode containing the implemented survivor selection mechanisms. You can choose one of the survivor selection options listed below:
+ \begin{itemize}
+ \item \textit{fitnessBased} - Individuals with higher fitness scores are more likely to be selected to survive and
+ proceed to the next generation. It suppoort only single-objective optimization problem.
+ \item \textit{ageBased} - Individuals are selected for survival based on their age or generation, with older individuals being prioritized
+ for retention. It suppoort only single-objective optimization problem.
+ \item \textit{rankNcrowdingBased} - Individuals with low rank and crowding distance are more likely to be selected to survive and
+ proceed to the next generation. It suppoort only multi-objective optimization problem.
+ \end{itemize}""")
GAparams.addSub(survivorSelection)
# Fitness
fitness = InputData.parameterInputFactory('fitness', strictMode=True,
contentType=InputTypes.StringType,
printPriority=108,
- descr=r"""a subnode containing the implemented fitness functions.
- This includes: \begin{itemize}
- \item invLinear:
- \[fitness = -a \times obj - b \times \sum\\_{j=1}^{nConstraint} max(0,-penalty\\_j) \].
-
- \item logistic:
- \[fitness = \frac{1}{1+e^{a\times(obj-b)}}\].
-
- \item
- feasibleFirst: \[fitness =
- -obj \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{for} \ \ g\\_j(x)\geq 0 \; \forall j\] and
- \[fitness = -obj\\_{worst} - \Sigma\\_{j=1}^{J} \ \ \ \ \ \ \ \ otherwise \]
- \end{itemize}.""")
+ descr=r"""a subnode containing the implemented fitness functions.You can choose one of the fitness options listed below:
+ \begin{itemize}
+ \item \textit{invLinear} - It assigns fitness values inversely proportional to the individual's objective function values,
+ prioritizing solutions with lower objective function values (i.e., minimization) for selection and reproduction. It suppoort only single-objective optimization problem.\\\\
+ $fitness = -a \times obj - b \times \sum_{j=1}^{nConstraint} max(0,-penalty_{j}) $\\
+ where j represents an index of objects
+ \\
+
+ \item \textit{logistic} - It applies a logistic function to transform raw objective function values into fitness scores. It suppoort only single-objective optimization problem.\\\\
+ $fitness = \frac{1}{1+e^{a\times(obj-b)}}$\\
+ \item \textit{feasibleFirst} - It prioritizes solutions that meet constraints by assigning higher fitness scores to feasible solutions,
+
+ encouraging the evolution of individuals that satisfy the problem's constraints. It suppoort single-and multi-objective optimization problem.\\\\
+ $fitness = \left\{\begin{matrix} -obj & g_{j}(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right$\\
+ \end{itemize} """)
fitness.addParam("type", InputTypes.StringType, True,
descr=r"""[invLin, logistic, feasibleFirst]""")
objCoeff = InputData.parameterInputFactory('a', strictMode=True,
- contentType=InputTypes.FloatType,
+ contentType=InputTypes.FloatListType,
printPriority=108,
descr=r""" a: coefficient of objective function.""")
fitness.addSub(objCoeff)
penaltyCoeff = InputData.parameterInputFactory('b', strictMode=True,
- contentType=InputTypes.FloatType,
+ contentType=InputTypes.FloatListType,
printPriority=108,
descr=r""" b: coefficient of constraint penalty.""")
fitness.addSub(penaltyCoeff)
@@ -304,6 +336,8 @@ def getSolutionExportVariableNames(cls):
new = {}
# new = {'': 'the size of step taken in the normalized input space to arrive at each optimal point'}
new['conv_{CONV}'] = 'status of each given convergence criteria'
+ new['rank'] = 'It refers to the sorting of solutions into non-dominated fronts based on their Pareto dominance relationships'
+ new['CD'] = 'It measures the density of solutions within each front to guide the selection of diverse individuals for the next generation'
new['fitness'] = 'fitness of the current chromosome'
new['age'] = 'age of current chromosome'
new['batchId'] = 'Id of the batch to whom the chromosome belongs'
@@ -311,6 +345,7 @@ def getSolutionExportVariableNames(cls):
new['AHD'] = 'Hausdorff Distance between populations'
new['HDSM'] = 'Hausdorff Distance Similarity Measure between populations'
new['ConstraintEvaluation_{CONSTRAINT}'] = 'Constraint function evaluation (negative if violating and positive otherwise)'
+ new['FitnessEvaluation_{OBJ}'] = 'Fitness evaluation of each objective'
ok.update(new)
return ok
@@ -322,56 +357,138 @@ def handleInput(self, paramInput):
@ Out, None
"""
RavenSampled.handleInput(self, paramInput)
- # GAparams
+ ####################################################################################
+ # GAparams #
+ ####################################################################################
gaParamsNode = paramInput.findFirst('GAparams')
- # populationSize
+
+ ####################################################################################
+ # populationSize #
+ ####################################################################################
populationSizeNode = gaParamsNode.findFirst('populationSize')
self._populationSize = populationSizeNode.value
- # parent selection
+
+ ####################################################################################
+ # parent selection node #
+ ####################################################################################
parentSelectionNode = gaParamsNode.findFirst('parentSelection')
self._parentSelectionType = parentSelectionNode.value
self._parentSelectionInstance = parentSelectionReturnInstance(self, name=parentSelectionNode.value)
- # reproduction node
+
+ if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection':
+ self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.')
+
+ ####################################################################################
+ # reproduction node #
+ ####################################################################################
reproductionNode = gaParamsNode.findFirst('reproduction')
self._nParents = int(np.ceil(1/2 + np.sqrt(1+4*self._populationSize)/2))
self._nChildren = int(2*comb(self._nParents,2))
- # crossover node
+
+ ####################################################################################
+ # k-Selection node #
+ ####################################################################################
+ if reproductionNode.findFirst('kSelection') is None:
+ self._kSelection = 3 # Default value is set to 3.
+ else:
+ self._kSelection = reproductionNode.findFirst('kSelection').value
+
+ ####################################################################################
+ # crossover node #
+ ####################################################################################
crossoverNode = reproductionNode.findFirst('crossover')
self._crossoverType = crossoverNode.parameterValues['type']
+ if self._crossoverType not in ['onePointCrossover','twoPointsCrossover','uniformCrossover', 'partiallyMappedCrossover']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support onePointCrossover, twoPointsCrossover, uniformCrossover and partiallyMappedCrossover as a crossover, whereas provided crossover is {self._crossoverType}')
if crossoverNode.findFirst('points') is None:
self._crossoverPoints = None
else:
self._crossoverPoints = crossoverNode.findFirst('points').value
self._crossoverProb = crossoverNode.findFirst('crossoverProb').value
self._crossoverInstance = crossoversReturnInstance(self,name = self._crossoverType)
- # mutation node
+
+ ####################################################################################
+ # mutation node #
+ ####################################################################################
mutationNode = reproductionNode.findFirst('mutation')
self._mutationType = mutationNode.parameterValues['type']
+ if self._mutationType not in ['swapMutator','scrambleMutator','inversionMutator','bitFlipMutator','randomMutator']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support swapMutator, scrambleMutator, inversionMutator, bitFlipMutator, and randomMutator as a mutator, whereas provided mutator is {self._mutationType}')
if mutationNode.findFirst('locs') is None:
self._mutationLocs = None
else:
self._mutationLocs = mutationNode.findFirst('locs').value
self._mutationProb = mutationNode.findFirst('mutationProb').value
self._mutationInstance = mutatorsReturnInstance(self,name = self._mutationType)
- # Survivor selection
+
+ ####################################################################################
+ # survivor selection node #
+ ####################################################################################
survivorSelectionNode = gaParamsNode.findFirst('survivorSelection')
self._survivorSelectionType = survivorSelectionNode.value
self._survivorSelectionInstance = survivorSelectionReturnInstance(self,name = self._survivorSelectionType)
- # Fitness
+ if self._survivorSelectionType not in ['ageBased','fitnessBased','rankNcrowdingBased']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support ageBased, fitnessBased, and rankNcrowdingBased as a survivorSelector, whereas provided survivorSelector is {self._survivorSelectionType}')
+ if len(self._objectiveVar) == 1 and self._survivorSelectionType == 'rankNcrowdingBased':
+ self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ')
+ if len(self._objectiveVar) > 1 and self._survivorSelectionType != 'rankNcrowdingBased':
+ self.raiseAnError(IOError, f'The only option supported in for Multi-objective Optimization is (rankNcrowdingBased).')
+
+ ####################################################################################
+ # fitness / constraint node #
+ ####################################################################################
fitnessNode = gaParamsNode.findFirst('fitness')
self._fitnessType = fitnessNode.parameterValues['type']
- # Check if the fitness requested is among the constrained optimization fitnesses
- # Currently, only InvLin and feasibleFirst Fitnesses deal with constrained optimization
+ ####################################################################################
+ # constraint node #
+ ####################################################################################
# TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness.
- if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst']:
- self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear and feasibleFirst fitnesses, whereas provided fitness is {self._fitnessType}')
+ if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','logistic', 'feasibleFirst']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, logistic, and feasibleFirst as a fitness, whereas provided fitness is {self._fitnessType}')
+ self._expConstr = self.assemblerObjects['Constraint'] if 'Constraint' in self.assemblerObjects else None
+ self._impConstr = self.assemblerObjects['ImplicitConstraint'] if 'ImplicitConstraint' in self.assemblerObjects else None
+ if self._expConstr != None and self._impConstr != None:
+ self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External'])
+ elif self._expConstr == None and self._impConstr != None:
+ self._numOfConst = len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External'])
+ elif self._expConstr != None and self._impConstr == None:
+ self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External'])
+ else:
+ self._numOfConst = 0
+ if (self._expConstr != None) and (self._impConstr != None) and (self._penaltyCoeff != None):
+ if len(self._penaltyCoeff) != len(self._objectiveVar) * self._numOfConst:
+ self.raiseAnError(IOError, f'The number of penaltyCoeff. in should be identical with the number of objective in and the number of constraints (i.e., and )')
+ else:
+ pass
self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None
- self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None
+
+ # TODO: @JunyungKim, the code lines below are for 'feasibleFirst' temperarily. It should be generalized for invLinear as well.
+ if self._fitnessType == 'feasibleFirst':
+ # Case 1: There is constraint(s) and penaltyCoeff are given by users
+ if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None:
+ self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else [1] * len(self._objectiveVar)
+ self._penaltyCoeff = fitnessNode.findFirst('b').value
+ # Case 2: There is NO constraint and penaltyCoeff are given by users
+ elif self._numOfConst == 0 and fitnessNode.findFirst('b') is not None:
+ self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints')
+ # Case 3: There is constraint(s) and penaltyCoeff is NOT given by users
+ elif self._numOfConst != 0 and fitnessNode.findFirst('b') is None:
+ self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else [1] * len(self._objectiveVar) #NOTE if objCoeff is not provided, then assume they are all 1.
+ self._penaltyCoeff = [1] * self._numOfConst * len(self._objectiveVar) #NOTE if penaltyCoeff is not provided, then assume they are all 1.
+ # Case 4: There is NO constraint and penaltyCoeff is NOT given by users
+ else:
+ self._objCoeff = [1] * len(self._objectiveVar)
+ self._penaltyCoeff = [0] * len(self._objectiveVar)
+ else:
+ self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None
+ self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None
self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType)
self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented.
- # Convergence Criterion
+ ####################################################################################
+ # convergence criterion node #
+ ####################################################################################
convNode = paramInput.findFirst('convergence')
if convNode is not None:
for sub in convNode.subparts:
@@ -427,60 +544,89 @@ def needDenormalized(self):
# overload as needed in inheritors
return True
- ###############
- # Run Methods #
- ###############
-
- def _useRealization(self, info, rlz):
- """
- Used to feedback the collected runs into actionable items within the sampler.
- This is called by localFinalizeActualSampling, and hence should contain the main skeleton.
- @ In, info, dict, identifying information about the realization
- @ In, rlz, xr.Dataset, new batched realizations
- @ Out, None
- """
- # The whole skeleton should be here, this should be calling all classes and _private methods.
+ def singleConstraint(self, info, rlz):
traj = info['traj']
for t in self._activeTraj[1:]:
self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0)
self.incrementIteration(traj)
- info['step'] = self.counter
-
- # Developer note: each algorithm step is indicated by a number followed by the generation number
- # e.g., '5 @ n-1' refers to step 5 for generation n-1 (i.e., previous generation)
- # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams
- # 5 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation)
-
- # 5.1 @ n-1: fitnessCalculation(rlz)
- # perform fitness calculation for newly obtained children (rlz)
+ if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ offSprings = datasetToDataArray(rlz, list(self.toBeSampled))
+ objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar[0]].data))
+
+ # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions)
+ constraintData = {}
+ if self._constraintFunctions or self._impConstraintFunctions:
+ params = []
+ for y in (self._constraintFunctions + self._impConstraintFunctions):
+ params += y.parameterNames()
+ for p in list(set(params) -set([self._objectiveVar[0]]) -set(list(self.toBeSampled.keys()))):
+ constraintData[p] = list(np.atleast_1d(rlz[p].data))
+ # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population
+ g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions)))
+
+ g = xr.DataArray(g0,
+ dims=['chromosome','Constraint'],
+ coords={'chromosome':np.arange(np.shape(offSprings)[0]),
+ 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]})
+ for index,individual in enumerate(offSprings):
+ newOpt = individual
+ opt = {self._objectiveVar[0]:objectiveVal[index]}
+ for p, v in constraintData.items():
+ opt[p] = v[index]
+
+ for constIndex, constraint in enumerate(self._constraintFunctions + self._impConstraintFunctions):
+ if constraint in self._constraintFunctions:
+ g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint)
+ else:
+ g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint)
+
+ offSpringFitness = self._fitnessInstance(rlz,
+ objVar=self._objectiveVar[0],
+ a=self._objCoeff,
+ b=self._penaltyCoeff,
+ penalty=None,
+ constraintFunction=g,
+ constraintNum = self._numOfConst,
+ type=self._minMax)
+
+ self._collectOptPoint(rlz, offSpringFitness, objectiveVal, g)
+ self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info)
+ return traj, g, objectiveVal, offSprings, offSpringFitness
+
+ def multiConstraint(self, info, rlz):
+ traj = info['traj']
+ for t in self._activeTraj[1:]:
+ self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0)
+ self.incrementIteration(traj)
+ objectiveVal = []
offSprings = datasetToDataArray(rlz, list(self.toBeSampled))
- objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar].data))
+ for i in range(len(self._objectiveVar)):
+ objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data)))
- # collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions)
+ # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions)
constraintData = {}
if self._constraintFunctions or self._impConstraintFunctions:
params = []
for y in (self._constraintFunctions + self._impConstraintFunctions):
params += y.parameterNames()
- for p in list(set(params) -set([self._objectiveVar]) -set(list(self.toBeSampled.keys()))):
+ for p in list(set(params) -set(self._objectiveVar) -set(list(self.toBeSampled.keys()))):
constraintData[p] = list(np.atleast_1d(rlz[p].data))
- # Compute constraint function g_j(x) for all constraints (j = 1 .. J)
- # and all x's (individuals) in the population
+ # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population
g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions)))
g = xr.DataArray(g0,
dims=['chromosome','Constraint'],
coords={'chromosome':np.arange(np.shape(offSprings)[0]),
'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]})
- # FIXME The constraint handling is following the structure of the RavenSampled.py,
- # there are many utility functions that can be simplified and/or merged together
- # _check, _handle, and _apply, for explicit and implicit constraints.
- # This can be simplified in the near future in GradientDescent, SimulatedAnnealing, and here in GA
+
for index,individual in enumerate(offSprings):
newOpt = individual
- opt = {self._objectiveVar:objectiveVal[index]}
+ objOpt = dict(zip(self._objectiveVar,
+ list(map(lambda x:-1 if x=="max" else 1 , self._minMax))))
+ opt = dict(zip(self._objectiveVar, [item[index] for item in objectiveVal]))
+ opt = {k: objOpt[k]*opt[k] for k in opt}
for p, v in constraintData.items():
opt[p] = v[index]
@@ -489,58 +635,96 @@ def _useRealization(self, info, rlz):
g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint)
else:
g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint)
+
offSpringFitness = self._fitnessInstance(rlz,
objVar=self._objectiveVar,
a=self._objCoeff,
b=self._penaltyCoeff,
- penalty=None,
constraintFunction=g,
- type=self._minMax)
+ constraintNum = self._numOfConst,
+ type = self._minMax)
+ return traj, g, objectiveVal, offSprings, offSpringFitness
- self._collectOptPoint(rlz, offSpringFitness, objectiveVal,g)
- self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info)
+ #########################################################################################################
+ # Run Methods #
+ #########################################################################################################
- if self._activeTraj:
- # 5.2@ n-1: Survivor selection(rlz)
- # update population container given obtained children
- if self.counter > 1:
- self.population,self.fitness,age,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge,
- variables=list(self.toBeSampled),
- population=self.population,
- fitness=self.fitness,
- newRlz=rlz,
- offSpringsFitness=offSpringFitness,
- popObjectiveVal=self.objectiveVal)
- self.popAge = age
- else:
- self.population = offSprings
- self.fitness = offSpringFitness
- self.objectiveVal = rlz[self._objectiveVar].data
+ #########################################################################################################
+ # Developer note:
+ # Each algorithm step is indicated by a number followed by the generation number
+ # e.g., '0 @ n-1' refers to step 0 for generation n-1 (i.e., previous generation)
+ # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams.
+ #########################################################################################################
+
+ def _useRealization(self, info, rlz):
+ """
+ Used to feedback the collected runs into actionable items within the sampler.
+ This is called by localFinalizeActualSampling, and hence should contain the main skeleton.
+ @ In, info, dict, identifying information about the realization
+ @ In, rlz, xr.Dataset, new batched realizations
+ @ Out, None
+ """
- # 1 @ n: Parent selection from population
- # pair parents together by indexes
+ info['step'] = self.counter
+ objInd = int(len(self._objectiveVar)>1) + 1 #if len(self._objectiveVar) == 1 else 2
+ constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint}
+ const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint)
+ traj, g, objectiveVal, offSprings, offSpringFitness = const(self, info, rlz)
+
+ if self._activeTraj:
+ # Step 0 @ n-1: Survivor selection(rlz): Update population container given obtained children
+ survivorSelectionFuncs: dict = {1: survivorSelectionProcess.singleObjSurvivorSelect, 2: survivorSelectionProcess.multiObjSurvivorSelect}
+ survivorSelection = survivorSelectionFuncs.get(objInd, survivorSelectionProcess.singleObjSurvivorSelect)
+ survivorSelection(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g)
+
+ # Step 1 @ n-1: Plot results
+ # ## TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used
+ # ## These are currently for debugging purposes @JunyungKim
+ # import matplotlib.pyplot as plt
+
+ # signChange = list(map(lambda x:-1 if x=="max" else 1 , self._minMax))
+ # for i in range(0, len(self.multiBestObjective)):
+ # newMultiBestObjective = self.multiBestObjective * signChange
+
+ # plt.title(str('BatchID = ' + str(self.batchId)))
+ # plt.plot(newMultiBestObjective[:,0],
+ # newMultiBestObjective[:,1],'*')
+
+ # for i in range(len(self.multiBestObjective[:,0])):
+ # plt.text(newMultiBestObjective[i,0],
+ # newMultiBestObjective[i,1], str(self.batchId))
+ # # plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png')
+ # plt.savefig('PF_'+str(self.batchId)+'.png')
+
+ # Step 2 @ n: Parent selection from population
+ # Pair parents together by indexes
parents = self._parentSelectionInstance(self.population,
variables=list(self.toBeSampled),
- fitness=self.fitness,
- nParents=self._nParents)
-
- # 2 @ n: Crossover from set of parents
- # create childrenCoordinates (x1,...,xM)
+ fitness = self.fitness,
+ kSelection = self._kSelection,
+ nParents=self._nParents,
+ rank = self.rank,
+ crowdDistance = self.crowdingDistance,
+ objVal = self._objectiveVar
+ )
+
+ # Step 3 @ n: Crossover from set of parents
+ # Create childrenCoordinates (x1,...,xM)
childrenXover = self._crossoverInstance(parents=parents,
variables=list(self.toBeSampled),
crossoverProb=self._crossoverProb,
points=self._crossoverPoints)
- # 3 @ n: Mutation
- # perform random directly on childrenCoordinates
+ # Step 4 @ n: Mutation
+ # Perform random directly on childrenCoordinates
childrenMutated = self._mutationInstance(offSprings=childrenXover,
distDict=self.distDict,
locs=self._mutationLocs,
mutationProb=self._mutationProb,
variables=list(self.toBeSampled))
- # 4 @ n: repair/replacement
- # repair should only happen if multiple genes in a single chromosome have the same values (),
+ # Step 5 @ n: repair/replacement
+ # Repair should only happen if multiple genes in a single chromosome have the same values (),
# and at the same time the sampling of these genes should be with Out replacement.
needsRepair = False
for chrom in range(self._nChildren):
@@ -559,12 +743,12 @@ def _useRealization(self, info, rlz):
children = children[:self._populationSize, :]
daChildren = xr.DataArray(children,
- dims=['chromosome','Gene'],
- coords={'chromosome': np.arange(np.shape(children)[0]),
- 'Gene':list(self.toBeSampled)})
+ dims=['chromosome','Gene'],
+ coords={'chromosome': np.arange(np.shape(children)[0]),
+ 'Gene':list(self.toBeSampled)})
- # 5 @ n: Submit children batch
- # submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates
+ # Step 6 @ n: Submit children batch
+ # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates
for i in range(self.batch):
newRlz = {}
for _, var in enumerate(self.toBeSampled.keys()):
@@ -588,7 +772,6 @@ def _submitRun(self, point, traj, step, moreInfo=None):
})
# NOTE: Currently, GA treats explicit and implicit constraints similarly
# while box constraints (Boundary constraints) are automatically handled via limits of the distribution
- #
self.raiseADebug(f'Adding run to queue: {self.denormalizeData(point)} | {info}')
self._submissionQueue.append((point, info))
@@ -602,6 +785,8 @@ def flush(self):
self.population = None
self.popAge = None
self.fitness = None
+ self.rank = None
+ self.crowdingDistance = None
self.ahdp = np.NaN
self.ahd = np.NaN
self.hdsm = np.NaN
@@ -609,6 +794,12 @@ def flush(self):
self.bestFitness = None
self.bestObjective = None
self.objectiveVal = None
+ self.multiBestPoint = None
+ self.multiBestFitness = None
+ self.multiBestObjective = None
+ self.multiBestConstraint = None
+ self.multiBestRank = None
+ self.multiBestCD = None
# END queuing Runs
# * * * * * * * * * * * * * * * *
@@ -654,13 +845,19 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info):
# NOTE: the solution export needs to be updated BEFORE we run rejectOptPoint or extend the opt
# point history.
if self._writeSteps == 'every':
- self._solutionExportUtilityUpdate(traj, rlz, fitness, g, acceptable)
-
+ for i in range(rlz.sizes['RAVEN_sample_ID']):
+ varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys())
+ rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars)
+ rlzDict[self._objectiveVar[0]] = np.atleast_1d(rlz[self._objectiveVar[0]].data)[i]
+ rlzDict['fitness'] = np.atleast_1d(fitness.to_array()[:,i])
+ for ind, consName in enumerate(g['Constraint'].values):
+ rlzDict['ConstraintEvaluation_'+consName] = g[i,ind]
+ self._updateSolutionExport(traj, rlzDict, acceptable, None)
# decide what to do next
if acceptable in ['accepted', 'first']:
# record history
bestRlz = {}
- bestRlz[self._objectiveVar] = self.bestObjective
+ bestRlz[self._objectiveVar[0]] = self.bestObjective
bestRlz['fitness'] = self.bestFitness
bestRlz.update(self.bestPoint)
self._optPointHistory[traj].append((bestRlz, info))
@@ -669,6 +866,77 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info):
else: # e.g. rerun
pass # nothing to do, just keep moving
+ def _resolveNewGenerationMulti(self, traj, rlz, info):
+ """
+ Store a new Generation after checking convergence
+ @ In, traj, int, trajectory for this new point
+ @ In, rlz, dict, realized realization
+ @ In, objectiveVal, list, objective values at each chromosome of the realization
+ @ In, fitness, xr.DataArray, fitness values at each chromosome of the realization
+ @ In, g, xr.DataArray, the constraint evaluation function
+ @ In, info, dict, identifying information about the realization
+ """
+ self.raiseADebug('*'*80)
+ self.raiseADebug(f'Trajectory {traj} iteration {info["step"]} resolving new state ...')
+ # note the collection of the opt point
+ self._stepTracker[traj]['opt'] = (rlz, info)
+ acceptable = 'accepted' if self.counter > 1 else 'first'
+ old = self.population
+ converged = self._updateConvergence(traj, rlz, old, acceptable)
+ if converged:
+ self._closeTrajectory(traj, 'converge', 'converged', self.bestObjective)
+ # NOTE: the solution export needs to be updated BEFORE we run rejectOptPoint or extend the opt
+ # point history.
+ objVal = [[] for x in range(len(self.objectiveVal[0]))]
+ for i in range(len(self.objectiveVal[0])):
+ objVal[i] = [item[i] for item in self.objectiveVal]
+
+ objVal = xr.DataArray(objVal,
+ dims=['chromosome','obj'],
+ coords={'chromosome':np.arange(np.shape(objVal)[0]),
+ 'obj': self._objectiveVar})
+ if self._writeSteps == 'every':
+ print("### rlz.sizes['RAVEN_sample_ID'] = {}".format(rlz.sizes['RAVEN_sample_ID']))
+ print("### self.population.shape is {}".format(self.population.shape))
+ for i in range(rlz.sizes['RAVEN_sample_ID']):
+ varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys())
+ # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars)
+ rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data))
+ rlzDict.update(dict((var,objVal.data[i][j]) for j, var in enumerate(objVal.obj.data)))
+ rlzDict['batchId'] = rlz['batchId'].data[i]
+ for j in range(len(self._objectiveVar)):
+ rlzDict[self._objectiveVar[j]] = objVal.data[i][j]
+ rlzDict['rank'] = np.atleast_1d(self.rank.data)[i]
+ rlzDict['CD'] = np.atleast_1d(self.crowdingDistance.data)[i]
+ for ind, fitName in enumerate(list(self.fitness.keys())):
+ rlzDict['FitnessEvaluation_'+fitName] = self.fitness[fitName].data[i]
+ for ind, consName in enumerate([y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]):
+ rlzDict['ConstraintEvaluation_'+consName] = self.constraintsV.data[i,ind]
+ self._updateSolutionExport(traj, rlzDict, acceptable, None)
+
+ # decide what to do next
+ if acceptable in ['accepted', 'first']:
+ # record history
+ bestRlz = {}
+ varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys())
+ bestRlz = dict((var,np.atleast_1d(rlz[var].data)) for var in set(varList) if var in rlz.data_vars)
+ for i in range(len(self._objectiveVar)):
+ bestRlz[self._objectiveVar[i]] = [item[i] for item in self.multiBestObjective]
+
+ bestRlz['rank'] = self.multiBestRank
+ bestRlz['CD'] = self.multiBestCD
+ if len(self.multiBestConstraint) != 0: # No constraints
+ for ind, consName in enumerate(self.multiBestConstraint.Constraint):
+ bestRlz['ConstraintEvaluation_'+consName.values.tolist()] = self.multiBestConstraint[ind].values
+ for ind, fitName in enumerate(list(self.multiBestFitness.keys())):
+ bestRlz['FitnessEvaluation_'+ fitName] = self.multiBestFitness[fitName].data
+ bestRlz.update(self.multiBestPoint)
+ self._optPointHistory[traj].append((bestRlz, info))
+ elif acceptable == 'rejected':
+ self._rejectOptPoint(traj, info, old)
+ else: # e.g. rerun
+ pass # nothing to do, just keep moving
+
def _collectOptPoint(self, rlz, fitness, objectiveVal, g):
"""
Collects the point (dict) from a realization
@@ -677,12 +945,14 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g):
@ In, fitness, xr.DataArray, fitness values at each chromosome of the realization
@ Out, point, dict, point used in this realization
"""
-
varList = list(self.toBeSampled.keys()) + self._solutionExport.getVars('input') + self._solutionExport.getVars('output')
varList = set(varList)
selVars = [var for var in varList if var in rlz.data_vars]
population = datasetToDataArray(rlz, selVars)
- optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),np.atleast_1d(fitness.data),objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))])
+ if self._fitnessType == 'hardConstraint':
+ optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1],-x[2]))])
+ else:
+ optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))])
point = dict((var,float(optPoints[0][i])) for i, var in enumerate(selVars) if var in rlz.data_vars)
gOfBest = dict(('ConstraintEvaluation_'+name,float(gOfBest[0][i])) for i, name in enumerate(g.coords['Constraint'].values))
if (self.counter > 1 and obj[0] <= self.bestObjective and fit[0] >= self.bestFitness) or self.counter == 1:
@@ -693,6 +963,51 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g):
return point
+ def _collectOptPointMulti(self, population, rank, CD, objVal, fitness, constraintsV):
+ """
+ Collects the point (dict) from a realization
+ @ In, population, Dataset, container containing the population
+ @ In, objectiveVal, list, objective values at each chromosome of the realization
+ @ In, rank, xr.DataArray, rank values at each chromosome of the realization
+ @ In, crowdingDistance, xr.DataArray, crowdingDistance values at each chromosome of the realization
+ @ Out, point, dict, point used in this realization
+ """
+ rankOneIDX = [i for i, rankValue in enumerate(rank.data) if rankValue == 1]
+ optPoints = population[rankOneIDX]
+ optObjVal = np.array([list(ele) for ele in list(zip(*objVal))])[rankOneIDX]
+ count = 0
+ for i in list(fitness.keys()):
+ data = fitness[i][rankOneIDX]
+ if count == 0:
+ fitSet = data.to_dataset(name = i)
+ else:
+ fitSet[i] = data
+ count = count + 1
+ optConstraintsV = constraintsV.data[rankOneIDX]
+ optRank = rank.data[rankOneIDX]
+ optCD = CD.data[rankOneIDX]
+
+ optPointsDic = dict((var,np.array(optPoints)[:,i]) for i, var in enumerate(population.Gene.data))
+ optConstNew = []
+ for i in range(len(optConstraintsV)):
+ optConstNew.append(optConstraintsV[i])
+ optConstNew = list(map(list, zip(*optConstNew)))
+ if (len(optConstNew)) != 0:
+ optConstNew = xr.DataArray(optConstNew,
+ dims=['Constraint','Evaluation'],
+ coords={'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)],
+ 'Evaluation':np.arange(np.shape(optConstNew)[1])})
+
+ self.multiBestPoint = optPointsDic
+ self.multiBestFitness = fitSet
+ self.multiBestObjective = optObjVal
+ self.multiBestConstraint = optConstNew
+ self.multiBestRank = optRank
+ self.multiBestCD = optCD
+
+ return
+
+
def _checkAcceptability(self, traj):
"""
This is an abstract method for all RavenSampled Optimizer, whereas for GA all children are accepted
@@ -709,16 +1024,26 @@ def checkConvergence(self, traj, new, old):
@ Out, any(convs.values()), bool, True of any of the convergence criteria was reached
@ Out, convs, dict, on the form convs[conv] = bool, where conv is in self._convergenceCriteria
"""
- convs = {}
- for conv in self._convergenceCriteria:
- fName = conv[:1].upper() + conv[1:]
- # get function from lookup
- f = getattr(self, f'_checkConv{fName}')
- # check convergence function
- okay = f(traj, new=new, old=old)
- # store and update
- convs[conv] = okay
-
+ if len(self._objectiveVar) == 1:
+ convs = {}
+ for conv in self._convergenceCriteria:
+ fName = conv[:1].upper() + conv[1:]
+ # get function from lookup
+ f = getattr(self, f'_checkConv{fName}')
+ # check convergence function
+ okay = f(traj, new=new, old=old)
+ # store and update
+ convs[conv] = okay
+ else:
+ convs = {}
+ for conv in self._convergenceCriteria:
+ fName = conv[:1].upper() + conv[1:]
+ # get function from lookup
+ f = getattr(self, f'_checkConv{fName}')
+ # check convergence function
+ okay = f(traj, new=new, old=old)
+ # store and update
+ convs[conv] = okay
return any(convs.values()), convs
def _checkConvObjective(self, traj, **kwargs):
@@ -728,16 +1053,23 @@ def _checkConvObjective(self, traj, **kwargs):
@ In, kwargs, dict, dictionary of parameters for convergence criteria
@ Out, converged, bool, convergence state
"""
- if len(self._optPointHistory[traj]) < 2:
- return False
- o1, _ = self._optPointHistory[traj][-1]
- obj = o1[self._objectiveVar]
- converged = (obj == self._convergenceCriteria['objective'])
- self.raiseADebug(self.convFormat.format(name='objective',
- conv=str(converged),
- got=obj,
- req=self._convergenceCriteria['objective']))
-
+ if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ if len(self._optPointHistory[traj]) < 2:
+ return False
+ o1, _ = self._optPointHistory[traj][-1]
+ obj = o1[self._objectiveVar[0]]
+ converged = (obj == self._convergenceCriteria['objective'])
+ self.raiseADebug(self.convFormat.format(name='objective',
+ conv=str(converged),
+ got=obj,
+ req=self._convergenceCriteria['objective']))
+ else: # This is for a multi-objective Optimization case.
+ if len(self._optPointHistory[traj]) < 2:
+ return False
+ o1, _ = self._optPointHistory[traj][-1]
+ obj1 = o1[self._objectiveVar[0]]
+ obj2 = o1[self._objectiveVar[1]]
+ converged = (obj1 == self._convergenceCriteria['objective'] and obj2 == self._convergenceCriteria['objective'])
return converged
def _checkConvAHDp(self, traj, **kwargs):
@@ -909,14 +1241,24 @@ def _updateConvergence(self, traj, new, old, acceptable):
@ Out, converged, bool, True if converged on ANY criteria
"""
# NOTE we have multiple "if acceptable" trees here, as we need to update soln export regardless
- if acceptable == 'accepted':
- self.raiseADebug(f'Convergence Check for Trajectory {traj}:')
- # check convergence
- converged, convDict = self.checkConvergence(traj, new, old)
- else:
- converged = False
- convDict = dict((var, False) for var in self._convergenceInfo[traj])
- self._convergenceInfo[traj].update(convDict)
+ if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ if acceptable == 'accepted':
+ self.raiseADebug(f'Convergence Check for Trajectory {traj}:')
+ # check convergence
+ converged, convDict = self.checkConvergence(traj, new, old)
+ else:
+ converged = False
+ convDict = dict((var, False) for var in self._convergenceInfo[traj])
+ self._convergenceInfo[traj].update(convDict)
+ else: # This is for a multi-objective Optimization case.
+ if acceptable == 'accepted':
+ self.raiseADebug(f'Convergence Check for Trajectory {traj}:')
+ # check convergence
+ converged, convDict = self.checkConvergence(traj, new, old)
+ else:
+ converged = False
+ convDict = dict((var, False) for var in self._convergenceInfo[traj])
+ self._convergenceInfo[traj].update(convDict)
return converged
@@ -952,8 +1294,9 @@ def _rejectOptPoint(self, traj, info, old):
"""
return
- # * * * * * * * * * * * *
- # Constraint Handling
+ ###############################
+ # Constraint Handling #
+ ###############################
def _handleExplicitConstraints(self, point, constraint):
"""
Computes explicit (i.e. input-based) constraints
@@ -1016,9 +1359,9 @@ def _checkImpFunctionalConstraints(self, point, opt, impConstraint):
g = impConstraint.evaluate('implicitConstraint', inputs)
return g
-
- # END constraint handling
- # * * * * * * * * * * * *
+ ###############################
+ # END constraint handling #
+ ###############################
def _addToSolutionExport(self, traj, rlz, acceptable):
"""
Contributes additional entries to the solution export.
@@ -1030,10 +1373,14 @@ def _addToSolutionExport(self, traj, rlz, acceptable):
# meta variables
toAdd = {'age': 0 if self.popAge is None else self.popAge,
'batchId': self.batchId,
- 'fitness': rlz['fitness'],
+ # 'fitness': rlz['fitness'],
'AHDp': self.ahdp,
'AHD': self.ahd,
- 'HDSM': self.hdsm}
+ 'rank': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['rank'],
+ 'CD': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['CD'],
+ 'HDSM': self.hdsm
+ }
+
for var, val in self.constants.items():
toAdd[var] = val
@@ -1060,6 +1407,8 @@ def _formatSolutionExportVariableNames(self, acceptable):
new.extend([template.format(CONV=conv) for conv in self._convergenceCriteria])
elif '{VAR}' in template:
new.extend([template.format(VAR=var) for var in self.toBeSampled])
+ elif '{OBJ}' in template:
+ new.extend([template.format(OBJ=obj) for obj in self._objectiveVar])
elif '{CONSTRAINT}' in template:
new.extend([template.format(CONSTRAINT=constraint.name) for constraint in self._constraintFunctions + self._impConstraintFunctions])
else:
diff --git a/ravenframework/Optimizers/GradientDescent.py b/ravenframework/Optimizers/GradientDescent.py
index 452f579f4e..7a37205c28 100644
--- a/ravenframework/Optimizers/GradientDescent.py
+++ b/ravenframework/Optimizers/GradientDescent.py
@@ -212,6 +212,7 @@ def __init__(self):
self._followerProximity = 1e-2 # distance at which annihilation can start occurring, in ?normalized? space
self._trajectoryFollowers = defaultdict(list) # map of trajectories to the trajectories following them
self._functionalConstraintExplorationLimit = 500 # number of input-space explorations allowable for functional constraints
+ self._canHandleMultiObjective = False # Currently Gradient Descent cannot handle multiobjective optimization
# __private
# additional methods
# register adaptive sample identification criteria
@@ -338,7 +339,11 @@ def _useRealization(self, info, rlz):
@ Out, None
"""
traj = info['traj']
- optVal = rlz[self._objectiveVar]
+ # if not self._canHandleMultiObjective and len(self._objectiveVar) == 1:
+ # self._objectiveVar = self._objectiveVar[0]
+ if len(self._objectiveVar) > 1 and type(self._objectiveVar)==list:
+ self.raiseAnError(IOError, 'Gradient Descent does not support multiObjective optimization yet! objective variable must be a single variable for now!')
+ optVal = rlz[self._objectiveVar[0]]
info['optVal'] = optVal
purpose = info['purpose']
if purpose.startswith('opt'):
@@ -353,13 +358,13 @@ def _useRealization(self, info, rlz):
gradMag, gradVersor, _ = self._gradientInstance.evaluate(opt,
grads,
gradInfos,
- self._objectiveVar)
+ self._objectiveVar[0])
self.raiseADebug(' ... gradient calculated ...')
self._gradHistory[traj].append((gradMag, gradVersor))
# get new step information
try:
newOpt, stepSize, stepInfo = self._stepInstance.step(opt,
- objVar=self._objectiveVar,
+ objVar=self._objectiveVar[0],
optHist=self._optPointHistory[traj],
gradientHist=self._gradHistory[traj],
prevStepSize=self._stepHistory[traj],
@@ -378,7 +383,7 @@ def _useRealization(self, info, rlz):
except NoConstraintResolutionFound:
# we've tried everything, but we just can't hack it
self.raiseAMessage(f'Optimizer "{self.name}" trajectory {traj} was unable to continue due to functional or boundary constraints.')
- self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar])
+ self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar[0]])
return
# update values if modified by constraint handling
@@ -598,7 +603,7 @@ def _checkAcceptability(self, traj, opt, optVal, info):
# Check acceptability
if self._optPointHistory[traj]:
old, _ = self._optPointHistory[traj][-1]
- oldVal = old[self._objectiveVar]
+ oldVal = old[self._objectiveVar[0]]
# check if following another trajectory
if self._terminateFollowers:
following = self._stepInstance.trajIsFollowing(traj, self.denormalizeData(opt), info,
@@ -815,7 +820,7 @@ def _checkConvObjective(self, traj):
return False
o1, _ = self._optPointHistory[traj][-1]
o2, _ = self._optPointHistory[traj][-2]
- delta = mathUtils.relativeDiff(o2[self._objectiveVar], o1[self._objectiveVar])
+ delta = mathUtils.relativeDiff(o2[self._objectiveVar[0]], o1[self._objectiveVar[0]])
converged = abs(delta) < self._convergenceCriteria['objective']
self.raiseADebug(self.convFormat.format(name='objective',
conv=str(converged),
diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py
index e731bb9dc1..9606a6394c 100644
--- a/ravenframework/Optimizers/Optimizer.py
+++ b/ravenframework/Optimizers/Optimizer.py
@@ -78,10 +78,10 @@ def getInputSpecification(cls):
specs.description = 'Optimizers'
# objective variable
- specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringType, strictMode=True,
+ specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringListType, strictMode=True,
printPriority=90, # more important than
- descr=r"""Name of the response variable (or ``objective function'') that should be optimized
- (minimized or maximized)."""))
+ descr=r"""Name of the objective variable(s) (or ``objective function'') that should be optimized
+ (minimized or maximized). It can be a single string or a list of strings if it is a multi-objective problem. """))
# modify Sampler variable nodes
variable = specs.getSub('variable')
@@ -103,7 +103,8 @@ def getInputSpecification(cls):
descr=r"""seed for random number generation. Note that by default RAVEN uses an internal seed,
so this seed must be changed to observe changed behavior. \default{RAVEN-determined}""")
minMaxEnum = InputTypes.makeEnumType('MinMax', 'MinMaxType', ['min', 'max'])
- minMax = InputData.parameterInputFactory('type', contentType=minMaxEnum,
+ minMaxList = InputTypes.StringListType()
+ minMax = InputData.parameterInputFactory('type', contentType=minMaxList,
descr=r"""the type of optimization to perform. \xmlString{min} will search for the lowest
\xmlNode{objective} value, while \xmlString{max} will search for the highest value.""")
init.addSub(seed)
@@ -160,7 +161,7 @@ def __init__(self):
# public
# _protected
self._seed = None # random seed to apply
- self._minMax = 'min' # maximization or minimization?
+ self._minMax = ['min'] # maximization or minimization?
self._activeTraj = [] # tracks live trajectories
self._cancelledTraj = {} # tracks cancelled trajectories, and reasons
self._convergedTraj = {} # tracks converged trajectories, and values obtained
@@ -248,7 +249,6 @@ def handleInput(self, paramInput):
@ Out, None
"""
# the reading of variables (dist or func) and constants already happened in _readMoreXMLbase in Sampler
- # objective var
self._objectiveVar = paramInput.findFirst('objective').value
# sampler init
@@ -263,6 +263,10 @@ def handleInput(self, paramInput):
minMax = init.findFirst('type')
if minMax is not None:
self._minMax = minMax.value
+ if len(self._minMax) != len(self._objectiveVar):
+ self.raiseAnError(IOError, 'The length of in -- and in - must be of the same length!')
+ if list(set(self._minMax)-set(['min','max'])) != []:
+ self.raiseAnError(IOError, " under - must be a either 'min' and/or 'max'")
# variables additional reading
for varNode in paramInput.findAll('variable'):
diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py
index 61bc4c9841..fc1647dbf6 100644
--- a/ravenframework/Optimizers/RavenSampled.py
+++ b/ravenframework/Optimizers/RavenSampled.py
@@ -301,8 +301,16 @@ def localFinalizeActualSampling(self, jobObject, model, myInput):
# # testing suggests no big deal on smaller problem
# the sign of the objective function is flipped in case we do maximization
# so get the correct-signed value into the realization
- if self._minMax == 'max':
- rlz[self._objectiveVar] *= -1
+
+ if 'max' in self._minMax:
+ if not self._canHandleMultiObjective and len(self._objectiveVar) == 1:
+ rlz[self._objectiveVar[0]] *= -1
+ elif type(self._objectiveVar) == list:
+ for i in range(len(self._objectiveVar)):
+ if self._minMax[i] == 'max':
+ rlz[self._objectiveVar[i]] *= -1
+ else:
+ rlz[self._objectiveVar] *= -1
# TODO FIXME let normalizeData work on an xr.DataSet (batch) not just a dictionary!
rlz = self.normalizeData(rlz)
self._useRealization(info, rlz)
@@ -313,57 +321,127 @@ def finalizeSampler(self, failedRuns):
@ In, failedRuns, list, runs that failed as part of this sampling
@ Out, None
"""
- # get and print the best trajectory obtained
- bestValue = None
- bestTraj = None
- bestPoint = None
- s = -1 if self._minMax == 'max' else 1
- # check converged trajectories
- self.raiseAMessage('*' * 80)
- self.raiseAMessage('Optimizer Final Results:')
- self.raiseADebug('')
- self.raiseADebug(' - Trajectory Results:')
- self.raiseADebug(' TRAJ STATUS VALUE')
- statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
- # print cancelled traj
- for traj, info in self._cancelledTraj.items():
- val = info['value']
- status = info['reason']
- self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
- # check converged traj
- for traj, info in self._convergedTraj.items():
+ if not self._canHandleMultiObjective or len(self._objectiveVar) == 1:
+ # get and print the best trajectory obtained
+ bestValue = None
+ bestTraj = None
+ bestPoint = None
+ s = -1 if 'max' in self._minMax else 1
+ # check converged trajectories
+ self.raiseAMessage('*' * 80)
+ self.raiseAMessage('Optimizer Final Results:')
+ self.raiseADebug('')
+ self.raiseADebug(' - Trajectory Results:')
+ self.raiseADebug(' TRAJ STATUS VALUE')
+ statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
+ # print cancelled traj
+ for traj, info in self._cancelledTraj.items():
+ val = info['value']
+ status = info['reason']
+ self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
+ # check converged traj
+ for traj, info in self._convergedTraj.items():
+ opt = self._optPointHistory[traj][-1][0]
+ val = info['value']
+ self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
+ if bestValue is None or val < bestValue:
+ bestTraj = traj
+ bestValue = val
+ # further check active unfinished trajectories
+ # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
+ traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
+ # sanity check: if there's no history (we never got any answers) then report rather than crash
+ if len(self._optPointHistory[traj]) == 0:
+ self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
+ 'Perhaps the Model failed?')
opt = self._optPointHistory[traj][-1][0]
- val = info['value']
- self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
+ val = opt[self._objectiveVar[0]]
+ self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
if bestValue is None or val < bestValue:
- bestTraj = traj
bestValue = val
- # further check active unfinished trajectories
- # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
- traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
- # sanity check: if there's no history (we never got any answers) then report than rather than crash
- if len(self._optPointHistory[traj]) == 0:
- self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
- 'Perhaps the Model failed?')
- opt = self._optPointHistory[traj][-1][0]
- val = opt[self._objectiveVar]
- self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
- if bestValue is None or val < bestValue:
- bestValue = val
- bestTraj = traj
- bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
- bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
- self.raiseADebug('')
- self.raiseAMessage(' - Final Optimal Point:')
- finalTemplate = ' {name:^20s} {value: 1.3e}'
- finalTemplateInt = ' {name:^20s} {value: 3d}'
- self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue))
- self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
- for var, val in bestPoint.items():
- self.raiseAMessage(finalTemplate.format(name=var, value=val))
- self.raiseAMessage('*' * 80)
- # write final best solution to soln export
- self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
+ bestTraj = traj
+ bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
+ bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
+ self.raiseADebug('')
+ self.raiseAMessage(' - Final Optimal Point:')
+ finalTemplate = ' {name:^20s} {value: 1.3e}'
+ finalTemplateInt = ' {name:^20s} {value: 3d}'
+ self.raiseAMessage(finalTemplate.format(name=self._objectiveVar[0], value=s * bestValue))
+ self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
+ for var, val in bestPoint.items():
+ self.raiseAMessage(finalTemplate.format(name=var, value=val))
+ self.raiseAMessage('*' * 80)
+ # write final best solution to soln export
+ self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
+ else:
+ # get and print the best trajectory obtained
+ bestValue = None
+ bestTraj = None
+ bestPoint = None
+ s = -1 if self._minMax == 'max' else 1
+ # check converged trajectories
+ self.raiseAMessage('*' * 80)
+ self.raiseAMessage('Optimizer Final Results:')
+ self.raiseADebug('')
+ self.raiseADebug(' - Trajectory Results:')
+ self.raiseADebug(' TRAJ STATUS VALUE')
+ statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
+ statusTemplate_multi = ' {traj:2d} {status:^11s} {val1: ^11s} {val2: ^11s}'
+
+ # print cancelled traj
+ for traj, info in self._cancelledTraj.items():
+ val = info['value']
+ status = info['reason']
+ self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
+ # check converged traj
+ for traj, info in self._convergedTraj.items():
+ opt = self._optPointHistory[traj][-1][0]
+ val = info['value']
+ self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
+ if bestValue is None or val < bestValue:
+ bestTraj = traj
+ bestValue = val
+ # further check active unfinished trajectories
+ # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
+ traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
+ # sanity check: if there's no history (we never got any answers) then report rather than crash
+ if len(self._optPointHistory[traj]) == 0:
+ self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
+ 'Perhaps the Model failed?')
+
+ if len(self._objectiveVar) == 1:
+ opt = self._optPointHistory[traj][-1][0]
+ val = opt[self._objectiveVar]
+ self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
+ if bestValue is None or val < bestValue:
+ bestValue = val
+ bestTraj = traj
+ bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
+ bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
+ self.raiseADebug('')
+ self.raiseAMessage(' - Final Optimal Point:')
+ finalTemplate = ' {name:^20s} {value: 1.3e}'
+ finalTemplateInt = ' {name:^20s} {value: 3d}'
+ # self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue))
+ self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
+ for var, val in bestPoint.items():
+ self.raiseAMessage(finalTemplate.format(name=var, value=val))
+ self.raiseAMessage('*' * 80)
+ # write final best solution to soln export
+ self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
+ else:
+ for i in range(len(self._optPointHistory[traj][-1][0][self._objectiveVar[0]])):
+ opt = self._optPointHistory[traj][-1][0]
+ key = list(opt.keys())
+ val = [item[i] for item in opt.values()]
+ optElm = {key[a]: val[a] for a in range(len(key))}
+ optVal = [(-1*(self._minMax[b]=='max')+(self._minMax[b]=='min'))*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))]
+
+ bestTraj = traj
+ bestOpt = self.denormalizeData(optElm)
+ bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
+
+ self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
def flush(self):
"""
@@ -499,10 +577,10 @@ def _handleImplicitConstraints(self, previous):
@ Out, accept, bool, whether point was satisfied implicit constraints
"""
normed = copy.deepcopy(previous)
- oldVal = normed[self._objectiveVar]
- normed.pop(self._objectiveVar, oldVal)
+ oldVal = normed[self._objectiveVar[0]]
+ normed.pop(self._objectiveVar[0], oldVal)
denormed = self.denormalizeData(normed)
- denormed[self._objectiveVar] = oldVal
+ denormed[self._objectiveVar[0]] = oldVal
accept = self._checkImpFunctionalConstraints(denormed)
return accept
@@ -570,9 +648,12 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info):
# TODO could we ever use old rerun gradients to inform the gradient direction as well?
self._rerunsSinceAccept[traj] += 1
N = self._rerunsSinceAccept[traj] + 1
- oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar]
+ if len(self._objectiveVar) == 1:
+ oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]]
+ else:
+ oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]]
newAvg = ((N-1)*oldVal + optVal) / N
- self._optPointHistory[traj][-1][0][self._objectiveVar] = newAvg
+ self._optPointHistory[traj][-1][0][self._objectiveVar[0]] = newAvg
else:
self.raiseAnError(f'Unrecognized acceptability: "{acceptable}"')
@@ -637,15 +718,22 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason):
'modelRuns': self.counter
})
# optimal point input and output spaces
- objValue = rlz[self._objectiveVar]
- if self._minMax == 'max':
- objValue *= -1
- toExport[self._objectiveVar] = objValue
+ if len(self._objectiveVar) == 1: # Single Objective Optimization
+ objValue = rlz[self._objectiveVar[0]]
+ if 'max' in self._minMax:
+ objValue *= -1
+ toExport[self._objectiveVar[0]] = objValue
+ else: # Multi Objective Optimization
+ for i in range(len(self._objectiveVar)):
+ objValue = rlz[self._objectiveVar[i]]
+ if self._minMax[i] == 'max':
+ objValue *= -1
+ toExport[self._objectiveVar[i]] = objValue
toExport.update(self.denormalizeData(dict((var, rlz[var]) for var in self.toBeSampled)))
# constants and functions
toExport.update(self.constants)
toExport.update(dict((var, rlz[var]) for var in self.dependentSample if var in rlz))
- # additional from from inheritors
+ # additional from inheritors
toExport.update(self._addToSolutionExport(traj, rlz, acceptable))
# check for anything else that solution export wants that rlz might provide
for var in self._solutionExport.getVars():
diff --git a/ravenframework/Optimizers/SimulatedAnnealing.py b/ravenframework/Optimizers/SimulatedAnnealing.py
index 81df017856..123868498b 100644
--- a/ravenframework/Optimizers/SimulatedAnnealing.py
+++ b/ravenframework/Optimizers/SimulatedAnnealing.py
@@ -191,6 +191,7 @@ def __init__(self):
self._coolingMethod = None # initializing cooling method
self._coolingParameters = {} # initializing the cooling schedule parameters
self.info = {}
+ self._canHandleMultiObjective = False # Currently Simulated Annealing can only handle single objective
def handleInput(self, paramInput):
"""
@@ -319,9 +320,11 @@ def _useRealization(self, info, rlz):
@ Out, None
"""
traj = info['traj']
- info['optVal'] = rlz[self._objectiveVar]
+ if len(self._objectiveVar) > 1 and type(self._objectiveVar)==str:
+ self.raiseAnError(IOError, 'Simulated Annealing does not support multiObjective yet! objective variable must be a single variable for now!')
+ info['optVal'] = rlz[self._objectiveVar[0]]
self.incrementIteration(traj)
- self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar], info)
+ self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar[0]], info)
if self._stepTracker[traj]['opt'] is None:
# revert to the last accepted point
rlz = self._optPointHistory[traj][-1][0]
@@ -340,7 +343,7 @@ def _useRealization(self, info, rlz):
except NoConstraintResolutionFound:
# we've tried everything, but we just can't hack it
self.raiseAMessage(f'Optimizer "{self.name}" trajectory {traj} was unable to continue due to functional or boundary constraints.')
- self._closeTrajectory(traj, 'converge', 'no constraint resolution', newPoint[self._objectiveVar])
+ self._closeTrajectory(traj, 'converge', 'no constraint resolution', newPoint[self._objectiveVar[0]])
return
suggested = self.denormalizeData(suggested)
@@ -420,7 +423,7 @@ def _checkConvObjective(self, traj):
return False
o1, _ = self._optPointHistory[traj][-1]
o2, _ = self._optPointHistory[traj][-2]
- delta = o2[self._objectiveVar]-o1[self._objectiveVar]
+ delta = o2[self._objectiveVar[0]]-o1[self._objectiveVar[0]]
converged = abs(delta) < self._convergenceCriteria['objective']
self.raiseADebug(self.convFormat.format(name='objective',
conv=str(converged),
@@ -469,9 +472,9 @@ def _checkAcceptability(self, traj, opt, optVal, info):
# NOTE: if self._optPointHistory[traj]: -> faster to use "try" for all but the first time
try:
old, _ = self._optPointHistory[traj][-1]
- oldVal = old[self._objectiveVar]
+ oldVal = old[self._objectiveVar[0]]
# check if same point
- self.raiseADebug(f' ... change: {opt[self._objectiveVar]-oldVal:1.3e} new objective: {opt[self._objectiveVar]:1.6e} old objective: {oldVal:1.6e}')
+ self.raiseADebug(f' ... change: {opt[self._objectiveVar[0]]-oldVal:1.3e} new objective: {opt[self._objectiveVar[0]]:1.6e} old objective: {oldVal:1.6e}')
# if this is an opt point rerun, accept it without checking.
if self._acceptRerun[traj]:
acceptable = 'rerun'
@@ -480,7 +483,7 @@ def _checkAcceptability(self, traj, opt, optVal, info):
# this is the classic "same point" trap; we accept the same point, and check convergence later
acceptable = 'accepted'
else:
- if self._acceptabilityCriterion(oldVal,opt[self._objectiveVar])>randomUtils.random(dim=1, samples=1): # TODO replace it back
+ if self._acceptabilityCriterion(oldVal,opt[self._objectiveVar[0]])>randomUtils.random(dim=1, samples=1): # TODO replace it back
acceptable = 'accepted'
else:
acceptable = 'rejected'
diff --git a/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py b/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py
index eac639237a..360765d3ae 100644
--- a/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py
+++ b/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py
@@ -67,7 +67,7 @@ def evaluate(self, var, bayesianOptimizer, vectorized=False):
"""
# Need to retrieve current optimum point
best = bayesianOptimizer._optPointHistory[0][-1][0]
- fopt = best[bayesianOptimizer._objectiveVar]
+ fopt = best[bayesianOptimizer._objectiveVar[0]]
# Need to convert array input "x" into dict point
featurePoint = bayesianOptimizer.arrayToFeaturePoint(var)
@@ -112,7 +112,7 @@ def gradient(self, var, bayesianOptimizer):
# Need to retrieve current optimum point
best = bayesianOptimizer._optPointHistory[0][-1][0]
- fopt = best[bayesianOptimizer._objectiveVar]
+ fopt = best[bayesianOptimizer._objectiveVar[0]]
# Other common quantities
beta = (fopt - mu)/s
phi = norm.pdf(beta)
diff --git a/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py b/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py
index b3b8e1311a..787c204d5f 100644
--- a/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py
+++ b/ravenframework/Optimizers/acquisitionFunctions/LowerConfidenceBound.py
@@ -197,8 +197,8 @@ def _converged(self, bayesianOptimizer):
if self._optValue is None:
converged = False
return converged
- optDiff = np.absolute(-1*self._optValue - bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar])
- optDiff /= np.absolute(bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar])
+ optDiff = np.absolute(-1*self._optValue - bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar[0]])
+ optDiff /= np.absolute(bayesianOptimizer._optPointHistory[0][-1][0][bayesianOptimizer._objectiveVar[0]])
if optDiff <= bayesianOptimizer._acquisitionConv:
converged = True
else:
diff --git a/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py b/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py
index 7f66d31dc5..04742a2b1a 100644
--- a/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py
+++ b/ravenframework/Optimizers/acquisitionFunctions/ProbabilityOfImprovement.py
@@ -129,7 +129,7 @@ def evaluate(self, var, bayesianOptimizer, vectorized=False):
"""
# Need to retrieve current optimum point
best = bayesianOptimizer._optPointHistory[0][-1][0]
- fopt = best[bayesianOptimizer._objectiveVar]
+ fopt = best[bayesianOptimizer._objectiveVar[0]]
# Need to convert array input "x" into dict point
featurePoint = bayesianOptimizer.arrayToFeaturePoint(var)
diff --git a/ravenframework/Optimizers/crossOverOperators/crossovers.py b/ravenframework/Optimizers/crossOverOperators/crossovers.py
index f03d03ef07..51867a1b5b 100644
--- a/ravenframework/Optimizers/crossOverOperators/crossovers.py
+++ b/ravenframework/Optimizers/crossOverOperators/crossovers.py
@@ -15,9 +15,13 @@
Implementation of crossovers for crossover process of Genetic Algorithm
currently the implemented crossover algorithms are:
1. OnePoint Crossover
+ 2. TwoPoints Crossover
+ 3. Uniform Crossover
+ 4. TwoPoints Partially Mapped Crossover (PMX)
Created June,16,2020
- @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
+ Last update July,8,2024
+ @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Juan Luque-Gutierrez
"""
import numpy as np
@@ -151,11 +155,57 @@ def twoPointsCrossover(parents, **kwargs):
return children
+def partiallyMappedCrossover(parents, **kwargs):
+ """
+ Method designed to perform a two point partially mapped crossover (MPX) on 2 parents:
+ Partition each parents in three sequences (A,B,C):
+ parent1 = A1 B1 C1
+ parent2 = A2 B2 C2
+ Then:
+ children1 = A1* B2 C1*
+ children2 = A2* B1 C2*
+ Children should have the same elements as their parents, but in different order.
+ This crossover preserves the genes in a chromosome.
+ @ In, parents, xr.DataArray, parents involved in the mating process
+ @ In, kwargs, dict, dictionary of parameters for this mutation method:
+ parents, 2D array, parents in the current mating process.
+ Shape is nParents x len(chromosome) i.e, number of Genes/Vars
+ crossoverProb, float, crossoverProb determines when child takes genes from a specific parent, default is random
+ points, integer, point at which the cross over happens, default is random
+ @ Out, children, xr.DataArray, children resulting from the crossover. Shape is nParents x len(chromosome) i.e, number of Genes/Vars
+ """
+ nParents, nGenes = np.shape(parents)
+ children = xr.DataArray(np.zeros((int(2*comb(nParents,2)), np.shape(parents)[1])),
+ dims = ['chromosome', 'Gene'],
+ coords = {'chromosome': np.arange(int(2*comb(nParents, 2))),
+ 'Gene':parents.coords['Gene'].values})
+ parentPairs = list(combinations(parents, 2))
+ index = 0
+ if nGenes <= 2:
+ ValueError('The number of genes should be >= 3')
+ for couples in parentPairs:
+ [loc1, loc2] = randomUtils.randomChoice(list(range(1, nGenes)), size = 2, replace=False, engine=None)
+ if loc1 > loc2:
+ locL = loc2
+ locU = loc1
+ else:
+ locL = loc1
+ locU = loc2
+ parent1 = couples[0]
+ parent2 = couples[1]
+ children1, children2 = twoPointsPMXMethod(parent1, parent2, locL, locU)
+
+ children[index] = children1
+ children[index + 1] = children2
+ index = index + 2
+
+ return children
+
__crossovers = {}
__crossovers['onePointCrossover'] = onePointCrossover
__crossovers['twoPointsCrossover'] = twoPointsCrossover
__crossovers['uniformCrossover'] = uniformCrossover
-
+__crossovers['partiallyMappedCrossover'] = partiallyMappedCrossover
def returnInstance(cls, name):
"""
@@ -215,3 +265,51 @@ def uniformCrossoverMethod(parent1,parent2,crossoverProb):
children2[pos] = parent2[pos]
return children1,children2
+
+def twoPointsPMXMethod(parent1, parent2, locL, locU):
+ """
+ Method designed to perform a two point Partially Mapped Crossover (PMX) on 2 arrays:
+ Partition each array into three sequences (A, B, C):
+ parent1 = A1 B1 C1
+ parent2 = A2 B2 C2
+ We map the values contained in B1 to B2.
+ Then:
+ children1 = X B2 X
+ children2 = X B1 X
+ We verify if the values in A and C are found in B for each children. If so, we
+ replace such values for the ones in the map.
+ children1 = A1* B2 C1*
+ children2 = A2* B1 C2*
+ Children should have the same elements as their parents, but in different order.
+ @ In, parent1, first array
+ @ In, parent2, second array
+ @ In, locL: first location
+ @ In, LocU: second location
+ @ Out, children1: first generated array
+ @ Out, children2: second generated array
+ """
+
+ size = len(parent1)
+
+ children1 = parent1.copy(deep=True)
+ children2 = parent2.copy(deep=True)
+
+ seqB1 = parent1.values[locL:locU]
+ seqB2 = parent2.values[locL:locU]
+
+ children1[locL:locU] = seqB2
+ children2[locL:locU] = seqB1
+
+ # Determine mapping relationship
+ mapping1 = {parent2.values[i]: parent1.values[i] for i in range(locL, locU)}
+ mapping2 = {parent1.values[i]: parent2.values[i] for i in range(locL, locU)}
+
+ for i in list(range(locL)) + list(range(locU, size)):
+ if children1.values[i] in mapping1:
+ while children1.values[i] in mapping1:
+ children1.values[i] = mapping1[children1.values[i]]
+ if children2.values[i] in mapping2:
+ while children2.values[i] in mapping2:
+ children2.values[i] = mapping2[children2.values[i]]
+
+ return children1, children2
\ No newline at end of file
diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py
index 53a27ff1c6..db58da1754 100644
--- a/ravenframework/Optimizers/fitness/fitness.py
+++ b/ravenframework/Optimizers/fitness/fitness.py
@@ -16,11 +16,17 @@
currently the implemented fitness function is a linear combination of the objective function and penalty function for constraint violation:
Created June,16,2020
- @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
+ Updated September,17,2023
+ @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim
"""
+# Internal Modules----------------------------------------------------------------------------------
+from ...utils import frontUtils
+from ..parentSelectors.parentSelectors import countConstViolation
+
# External Imports
import numpy as np
import xarray as xr
+import sys
# Internal Imports
# [MANDD] Note: the fitness function are bounded by 2 parameters: a and b
@@ -53,27 +59,25 @@ def invLinear(rlz,**kwargs):
the farthest from violating the constraint it is, The highest negative value it have the largest the violation is.
@ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome.
"""
- if kwargs['a'] == None:
- a = 1.0
- else:
- a = kwargs['a']
- if kwargs['b'] == None:
- b = 10.0
- else:
- b = kwargs['b']
- if kwargs['constraintFunction'].all() == None:
- penalty = 0.0
- else:
- penalty = kwargs['constraintFunction'].data
-
- objVar = kwargs['objVar']
- data = np.atleast_1d(rlz[objVar].data)
+ #NOTE invLinear is not yet support Multi-objective optimization problem solving. Further literature reivew applying invLinear method to multi-objective optimization
+ # needs to be involved. Potentially, applying obj_Worst in fitness function (i.e., -a[j] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[j] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1))
+ # should be considerd .
+ a = [1.0] if kwargs['a'] == None else kwargs['a']
+ b = [10.0] if kwargs['b'] == None else kwargs['b']
+ penalty = 0.0 if kwargs['constraintFunction'].all() == None else kwargs['constraintFunction'].data
+ objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar']
+ for j in range(len(objVar)):
+ data = np.atleast_1d(rlz[objVar][objVar[j]].data)
+ fitness = -a[j] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[j] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1)
+ fitness = xr.DataArray(np.squeeze(fitness),
+ dims=['chromosome'],
+ coords={'chromosome': np.arange(len(data))})
+ if j == 0:
+ fitnessSet = fitness.to_dataset(name = objVar[j])
+ else:
+ fitnessSet[objVar[j]] = fitness
+ return fitnessSet
- fitness = -a * (rlz[objVar].data).reshape(-1,1) - b * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1)
- fitness = xr.DataArray(np.squeeze(fitness),
- dims=['chromosome'],
- coords={'chromosome': np.arange(len(data))})
- return fitness
def feasibleFirst(rlz,**kwargs):
r"""
@@ -83,12 +87,14 @@ def feasibleFirst(rlz,**kwargs):
1. As the objective function decreases (comes closer to the min value), the fitness value increases
2. As the objective function increases (away from the min value), the fitness value decreases
3. As the solution violates the constraints the fitness should decrease and hence the solution is less favored by the algorithm.
- 4. For the violating solutions, the fitness is starts from the worst solution in the population
+ 4. For the violating solutions, the fitness starts from the worst solution in the population
(i.e., max objective in minimization problems and min objective in maximization problems)
For maximization problems the objective value is multiplied by -1 and hence the previous trends are inverted.
A great quality of this fitness is that if the objective value is equal for multiple solutions it selects the furthest from constraint violation.
+ Reference: Deb, Kalyanmoy. "An efficient constraint handling method for genetic algorithms." Computer methods in applied mechanics and engineering 186.2-4 (2000): 311-338.
+
.. math::
fitness = \[ \\begin{cases}
@@ -105,23 +111,41 @@ def feasibleFirst(rlz,**kwargs):
'constraintFunction', xr.Dataarray, containing all constraint functions (explicit and implicit) evaluations for the whole population
@ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome.
"""
- objVar = kwargs['objVar']
- g = kwargs['constraintFunction']
- data = np.atleast_1d(rlz[objVar].data)
- worstObj = max(data)
- fitness = []
- for ind in range(data.size):
- if np.all(g.data[ind, :]>=0):
- fit=(data[ind])
- else:
- fit = worstObj
- for constInd,_ in enumerate(g['Constraint'].data):
- fit+=(max(0,-1 * g.data[ind, constInd]))
- fitness.append(-1 * fit)
- fitness = xr.DataArray(np.array(fitness),
+ objVar = [kwargs['objVar']] if isinstance(kwargs['objVar'], str) == True else kwargs['objVar']
+ a = [1.0]*len(objVar) if kwargs['a'] == None else kwargs['a']
+ if kwargs['constraintNum'] == 0:
+ pen = kwargs['b']
+ else:
+ g = kwargs['constraintFunction']
+ penalty = kwargs['b']
+ pen = [penalty[i:i+len(g['Constraint'].data)] for i in range(0, len(penalty), len(g['Constraint'].data))]
+ objPen = dict(map(lambda i,j : (i,j), objVar, pen))
+
+ for i in range(len(objVar)):
+ data = np.atleast_1d(rlz[objVar][objVar[i]].data)
+ worstObj = max(data)
+ fitness = []
+ for ind in range(data.size):
+ if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0):
+ fit=(a[i]*data[ind])
+ else:
+ fit = a[i]*worstObj
+ for constInd,_ in enumerate(g['Constraint'].data):
+ fit = a[i]*fit + objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty."
+ if len(kwargs['type']) == 1:
+ fitness.append(-1*fit)
+ else:
+ fitness.append(fit)
+
+ fitness = xr.DataArray(np.array(fitness),
dims=['chromosome'],
coords={'chromosome': np.arange(len(data))})
- return fitness
+ if i == 0:
+ fitnessSet = fitness.to_dataset(name = objVar[i])
+ else:
+ fitnessSet[objVar[i]] = fitness
+
+ return fitnessSet
def logistic(rlz,**kwargs):
"""
@@ -141,31 +165,39 @@ def logistic(rlz,**kwargs):
@ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome.
"""
if kwargs['a'] == None:
- a = 1.0
+ a = [1.0]
else:
a = kwargs['a']
-
if kwargs['b'] == None:
- b = 0.0
+ b = [0.0]
else:
b = kwargs['b']
+ if isinstance(kwargs['objVar'], str) == True:
+ objVar = [kwargs['objVar']]
+ else:
+ objVar = kwargs['objVar']
+ for i in range(len(objVar)):
+ val = rlz[objVar][objVar[i]].data
+ data = np.atleast_1d(rlz[objVar][objVar[i]].data)
+ denom = 1.0 + np.exp(-a[0] * (val - b[0]))
+ fitness = 1.0 / denom
+ fitness = xr.DataArray(fitness.data,
+ dims=['chromosome'],
+ coords={'chromosome': np.arange(len(data))})
+ if i == 0:
+ fitnessSet = fitness.to_dataset(name = objVar[i])
+ else:
+ fitnessSet[objVar[i]] = fitness
- objVar = kwargs['objVar']
- val = rlz[objVar]
- data = np.atleast_1d(rlz[objVar].data)
- denom = 1.0 + np.exp(-a * (val - b))
- fitness = 1.0 / denom
- fitness = xr.DataArray(np.array(fitness),
- dims=['chromosome'],
- coords={'chromosome': np.arange(len(data))})
-
- return fitness
+ return fitnessSet
__fitness = {}
__fitness['invLinear'] = invLinear
__fitness['logistic'] = logistic
__fitness['feasibleFirst'] = feasibleFirst
+#NOTE hardConstraint method will be used later once constraintHandling is realized. Until then, it will be commented. @JunyungKim
+# __fitness['hardConstraint'] = hardConstraint
def returnInstance(cls, name):
@@ -176,5 +208,5 @@ def returnInstance(cls, name):
@ Out, __crossovers[name], instance of class
"""
if name not in __fitness:
- cls.raiseAnError (IOError, "{} FITNESS FUNCTION NOT IMPLEMENTED!!!!!".format(name))
+ cls.raiseAnError (IOError, "{} is not a supported fitness function. ".format(name))
return __fitness[name]
diff --git a/ravenframework/Optimizers/mutators/mutators.py b/ravenframework/Optimizers/mutators/mutators.py
index 1b541ce508..eb4f1e2b71 100644
--- a/ravenframework/Optimizers/mutators/mutators.py
+++ b/ravenframework/Optimizers/mutators/mutators.py
@@ -21,7 +21,7 @@
5. randomMutator
Created June,16,2020
- @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
+ @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim
"""
import numpy as np
import xarray as xr
@@ -34,14 +34,19 @@ def swapMutator(offSprings, distDict, **kwargs):
E.g.:
child=[a,b,c,d,e] --> b and d are selected --> child = [a,d,c,b,e]
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
locs, list, the 2 locations of the genes to be swapped
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
variables, list, variables names.
@ Out, children, xr.DataArray, the mutated chromosome, i.e., the child.
"""
- loc1,loc2 = locationsGenerator(offSprings, kwargs['locs'])
+ if kwargs['locs'] == None:
+ locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
+ loc1 = np.minimum(locs[0], locs[1])
+ loc2 = np.maximum(locs[0], locs[1])
+ else:
+ loc1 = np.minimum(kwargs['locs'][0], kwargs['locs'][1])
+ loc2 = np.maximum(kwargs['locs'][0], kwargs['locs'][1])
# initializing children
children = xr.DataArray(np.zeros((np.shape(offSprings))),
@@ -65,7 +70,6 @@ def scrambleMutator(offSprings, distDict, **kwargs):
This method performs the scramble mutator. For each child, a subset of genes is chosen
and their values are shuffled randomly.
@ In, offSprings, xr.DataArray, offsprings after crossover
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
chromosome, numpy.array, the chromosome that will mutate to the new child
locs, list, the locations of the genes to be randomly scrambled
@@ -73,7 +77,12 @@ def scrambleMutator(offSprings, distDict, **kwargs):
variables, list, variables names.
@ Out, child, np.array, the mutated chromosome, i.e., the child.
"""
- loc1,loc2 = locationsGenerator(offSprings, kwargs['locs'])
+ if kwargs['locs'] == None:
+ locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
+ locs.sort()
+ else:
+ locs = [kwargs['locs'][0], kwargs['locs'][1]]
+ locs.sort()
# initializing children
children = xr.DataArray(np.zeros((np.shape(offSprings))),
@@ -86,9 +95,9 @@ def scrambleMutator(offSprings, distDict, **kwargs):
children[i,j] = distDict[offSprings[i].coords['Gene'].values[j]].cdf(float(offSprings[i,j].values))
for i in range(np.shape(offSprings)[0]):
- for ind,element in enumerate([loc1,loc2]):
+ for ind,element in enumerate(locs):
if randomUtils.random(dim=1,samples=1)< kwargs['mutationProb']:
- children[i,loc1:loc2+1] = randomUtils.randomPermutation(list(children.data[i,loc1:loc2+1]),None)
+ children[i,locs[0]:locs[-1]+1] = randomUtils.randomPermutation(list(children.data[i,locs[0]:locs[-1]+1]),None)
for i in range(np.shape(offSprings)[0]):
for j in range(np.shape(offSprings)[1]):
@@ -103,7 +112,6 @@ def bitFlipMutator(offSprings, distDict, **kwargs):
The gene to be flipped is completely random.
The new value of the flipped gene is is completely random.
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
@@ -129,7 +137,6 @@ def randomMutator(offSprings, distDict, **kwargs):
"""
This method is designed to randomly mutate a single gene in each chromosome with probability = mutationProb.
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
@@ -156,13 +163,18 @@ def inversionMutator(offSprings, distDict, **kwargs):
E.g. given chromosome C = [0,1,2,3,4,5,6,7,8,9] and sampled locL=2 locU=6;
New chromosome C' = [0,1,6,5,4,3,2,7,8,9]
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
"""
# sample gene locations: i.e., determine locL and locU
- locL,locU = locationsGenerator(offSprings, kwargs['locs'])
+ if kwargs['locs'] == None:
+ locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
+ locL = np.minimum(locs[0], locs[1])
+ locU = np.maximum(locs[0], locs[1])
+ else:
+ locL = np.minimum(kwargs['locs'][0], kwargs['locs'][1])
+ locU = np.maximum(kwargs['locs'][0], kwargs['locs'][1])
for child in offSprings:
# the mutation is performed for each child independently
@@ -171,7 +183,7 @@ def inversionMutator(offSprings, distDict, **kwargs):
seq = np.arange(locL,locU+1)
allElems = []
for i,elem in enumerate(seq):
- allElems.append(distDict[child.coords['Gene'].values[i]].cdf(float(child[elem].values)))
+ allElems.append(distDict[child.coords['Gene'].values[i]].cdf(float(child[elem].values)))
mirrSeq = allElems[::-1]
mirrElems = []
@@ -182,23 +194,6 @@ def inversionMutator(offSprings, distDict, **kwargs):
return offSprings
-def locationsGenerator(offSprings,locs):
- """
- Methods designed to process the locations for the mutators. These locations can be either user specified or
- randomly generated.
- @ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, locs, list, the two locations of the genes to be swapped
- @ Out, loc1, loc2, int, the two ordered processed locations required by the mutators
- """
- if locs == None:
- locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
- loc1 = np.minimum(locs[0], locs[1])
- loc2 = np.maximum(locs[0], locs[1])
- else:
- loc1 = np.minimum(locs[0], locs[1])
- loc2 = np.maximum(locs[0], locs[1])
- return loc1, loc2
-
__mutators = {}
__mutators['swapMutator'] = swapMutator
__mutators['scrambleMutator'] = scrambleMutator
diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py
index fc82522271..34fc2c2725 100644
--- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py
+++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py
@@ -21,10 +21,15 @@
Created June,16,2020
@authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
"""
-
+# External Modules----------------------------------------------------------------------------------
import numpy as np
import xarray as xr
from ...utils import randomUtils
+# External Modules----------------------------------------------------------------------------------
+
+# Internal Modules----------------------------------------------------------------------------------
+from ...utils.gaUtils import dataArrayToDict, datasetToDataArray
+# Internal Modules End------------------------------------------------------------------------------
# For mandd: to be updated with RAVEN official tools
from itertools import combinations
@@ -42,7 +47,8 @@ def rouletteWheel(population,**kwargs):
"""
# Arguments
pop = population
- fitness = kwargs['fitness']
+ fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist])
+ # fitness = kwargs['fitness'].data
nParents= kwargs['nParents']
# if nparents = population size then do nothing (whole population are parents)
if nParents == pop.shape[0]:
@@ -62,15 +68,15 @@ def rouletteWheel(population,**kwargs):
roulettePointer = randomUtils.random(dim=1, samples=1)
# initialize Probability
counter = 0
- if np.all(fitness.data>=0) or np.all(fitness.data<=0):
- selectionProb = fitness.data/np.sum(fitness.data) # Share of the pie (rouletteWheel)
+ if np.all(fitness>=0) or np.all(fitness<=0):
+ selectionProb = fitness/np.sum(fitness) # Share of the pie (rouletteWheel)
else:
# shift the fitness to be all positive
- shiftedFitness = fitness.data + abs(min(fitness.data))
+ shiftedFitness = fitness + abs(min(fitness))
selectionProb = shiftedFitness/np.sum(shiftedFitness) # Share of the pie (rouletteWheel)
sumProb = selectionProb[counter]
- while sumProb < roulettePointer :
+ while sumProb <= roulettePointer :
counter += 1
sumProb += selectionProb[counter]
selectedParent[i,:] = pop.values[counter,:]
@@ -78,6 +84,9 @@ def rouletteWheel(population,**kwargs):
fitness = np.delete(fitness,counter,axis=0)
return selectedParent
+def countConstViolation(const):
+ return sum(1 for i in const if i < 0)
+
def tournamentSelection(population,**kwargs):
"""
Tournament Selection mechanism for parent selection
@@ -88,65 +97,50 @@ def tournamentSelection(population,**kwargs):
variables, list, variable names
@ Out, newPopulation, xr.DataArray, selected parents,
"""
- fitness = kwargs['fitness']
- nParents= kwargs['nParents']
+
+ nParents = kwargs['nParents']
+ nObjVal = len(kwargs['objVal'])
+ kSelect = kwargs['kSelection']
pop = population
popSize = population.values.shape[0]
- if 'rank' in kwargs:
- # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to
- rank = kwargs['rank']
- multiObjectiveRanking = True
- matrixOperationRaw = np.zeros((popSize,3))
- matrixOperationRaw[:,0] = np.transpose(np.arange(popSize))
- matrixOperationRaw[:,1] = np.transpose(fitness.data)
- matrixOperationRaw[:,2] = np.transpose(rank.data)
- matrixOperation = np.zeros((popSize,3))
- else:
- multiObjectiveRanking = False
- matrixOperationRaw = np.zeros((popSize,2))
- matrixOperationRaw[:,0] = np.transpose(np.arange(popSize))
- matrixOperationRaw[:,1] = np.transpose(fitness.data)
- matrixOperation = np.zeros((popSize,2))
-
- indexes = list(np.arange(popSize))
- indexesShuffled = randomUtils.randomChoice(indexes, size=popSize, replace=False, engine=None)
-
- if popSize<2*nParents:
- raise ValueError('In tournamentSelection the number of parents cannot be larger than half of the population size.')
-
- for idx, val in enumerate(indexesShuffled):
- matrixOperation[idx,:] = matrixOperationRaw[val,:]
-
- selectedParent = xr.DataArray(
- np.zeros((nParents,np.shape(pop)[1])),
- dims=['chromosome','Gene'],
- coords={'chromosome':np.arange(nParents),
- 'Gene': kwargs['variables']})
+ selectedParent = xr.DataArray(np.zeros((nParents,np.shape(pop)[1])),
+ dims=['chromosome','Gene'],
+ coords={'chromosome':np.arange(nParents),
+ 'Gene': kwargs['variables']})
- if not multiObjectiveRanking: # single-objective implementation of tournamentSelection
+ if nObjVal == 1: # single-objective Case
+ fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist])
for i in range(nParents):
- if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]:
- index = int(matrixOperation[2*i,0])
+ matrixOperationRaw = np.zeros((kSelect,2))
+ selectChromoIndexes = list(np.arange(len(pop))) #NOTE: JYK - selectChromoIndexes should cover all chromosomes in population.
+ selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None) #NOTE: JYK - randomly select several indices with size of kSelect.
+ matrixOperationRaw[:,0] = selectedChromo
+ matrixOperationRaw[:,1] = np.transpose(fitness[selectedChromo])
+ tournamentWinnerIndex = int(matrixOperationRaw[np.argmax(matrixOperationRaw[:,1]),0])
+ selectedParent[i,:] = pop.values[tournamentWinnerIndex,:]
+
+ else: # multi-objective Case
+ # the key rank is used in multi-objective optimization where rank identifies which front the point belongs to.
+ rank = kwargs['rank']
+ crowdDistance = kwargs['crowdDistance']
+ for i in range(nParents):
+ matrixOperationRaw = np.zeros((kSelect,3))
+ selectChromoIndexes = list(np.arange(kSelect))
+ selectedChromo = randomUtils.randomChoice(selectChromoIndexes, size=kSelect, replace=False, engine=None)
+ matrixOperationRaw[:,0] = selectedChromo
+ matrixOperationRaw[:,1] = np.transpose(rank.data[selectedChromo])
+ matrixOperationRaw[:,2] = np.transpose(crowdDistance.data[selectedChromo])
+ minRankIndex = list(np.where(matrixOperationRaw[:,1] == matrixOperationRaw[:,1].min())[0])
+ if len(minRankIndex) != 1: # More than one chrosome having same rank.
+ minRankNmaxCDIndex = list(np.where(matrixOperationRaw[minRankIndex,2] == matrixOperationRaw[minRankIndex,2].max())[0])
else:
- index = int(matrixOperation[2*i+1,0])
- selectedParent[i,:] = pop.values[index,:]
- else: # multi-objective implementation of tournamentSelection
- for i in range(nParents-1):
- if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]:
- index = int(matrixOperation[i,0])
- elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]:
- index = int(matrixOperation[i+1,0])
- else: # same rank case
- if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]:
- index = int(matrixOperation[i,0])
- else:
- index = int(matrixOperation[i+1,0])
- selectedParent[i,:] = pop.values[index,:]
+ minRankNmaxCDIndex = minRankIndex
+ tournamentWinnerIndex = minRankNmaxCDIndex[0]
+ selectedParent[i,:] = pop.values[tournamentWinnerIndex,:]
return selectedParent
-
def rankSelection(population,**kwargs):
"""
Rank Selection mechanism for parent selection
@@ -163,7 +157,7 @@ def rankSelection(population,**kwargs):
index = np.arange(0,pop.shape[0])
rank = np.arange(0,pop.shape[0])
- data = np.vstack((fitness,index))
+ data = np.vstack((np.array(fitness.variables['test_RankSelection']),index))
dataOrderedByDecreasingFitness = data[:,(-data[0]).argsort()]
dataOrderedByDecreasingFitness[0,:] = rank
dataOrderedByIncreasingPos = dataOrderedByDecreasingFitness[:,dataOrderedByDecreasingFitness[1].argsort()]
@@ -173,6 +167,7 @@ def rankSelection(population,**kwargs):
dims=['chromosome'],
coords={'chromosome': np.arange(np.shape(orderedRank)[0])})
+ rank = rank.to_dataset(name = 'test_RankSelection')
selectedParent = rouletteWheel(population, fitness=rank , nParents=kwargs['nParents'],variables=kwargs['variables'])
return selectedParent
diff --git a/ravenframework/Optimizers/survivorSelection/survivorSelection.py b/ravenframework/Optimizers/survivorSelection/survivorSelection.py
new file mode 100644
index 0000000000..bb0a738ea4
--- /dev/null
+++ b/ravenframework/Optimizers/survivorSelection/survivorSelection.py
@@ -0,0 +1,96 @@
+# Copyright 2017 Battelle Energy Alliance, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Implementation of survivorSelection step for new generation
+ selection process in Genetic Algorithm.
+
+ Created Apr,3,2024
+ @authors: Mohammad Abdo, Junyung Kim
+"""
+# External Modules----------------------------------------------------------------------------------
+import numpy as np
+import xarray as xr
+from ravenframework.utils import frontUtils
+# External Modules End------------------------------------------------------------------------------
+
+# Internal Modules----------------------------------------------------------------------------------
+from ...utils.gaUtils import dataArrayToDict, datasetToDataArray
+# Internal Modules End------------------------------------------------------------------------------
+
+# @profile
+
+def singleObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g):
+ if self.counter == 1:
+ self.population = offSprings
+ self.fitness = offSpringFitness
+ self.objectiveVal = rlz[self._objectiveVar[0]].data
+ else:
+ self.population, self.fitness,\
+ self.popAge,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge,
+ variables=list(self.toBeSampled),
+ population=self.population,
+ fitness=self.fitness,
+ newRlz=rlz,
+ offSpringsFitness=offSpringFitness,
+ popObjectiveVal=self.objectiveVal)
+
+def multiObjSurvivorSelect(self, info, rlz, traj, offSprings, offSpringFitness, objectiveVal, g):
+ if self.counter == 1:
+ self.population = offSprings
+ self.fitness = offSpringFitness
+ self.constraintsV = g
+ # offspringObjsVals for Rank and CD calculation
+ offObjVal = []
+ for i in range(len(self._objectiveVar)):
+ offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data)))
+
+ # offspringFitVals for Rank and CD calculation
+ fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data
+ offspringFitVals = fitVal.tolist()
+ offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals))
+ self.rank = xr.DataArray(offSpringRank,
+ dims=['rank'],
+ coords={'rank': np.arange(np.shape(offSpringRank)[0])})
+ offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank,
+ popSize=len(offSpringRank),
+ objectives=np.array(offspringFitVals))
+
+ self.crowdingDistance = xr.DataArray(offSpringCD,
+ dims=['CrowdingDistance'],
+ coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])})
+ self.objectiveVal = []
+ for i in range(len(self._objectiveVar)):
+ self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data)))
+ else:
+ self.population,self.rank, \
+ self.popAge,self.crowdingDistance, \
+ self.objectiveVal,self.fitness, \
+ self.constraintsV = self._survivorSelectionInstance(age=self.popAge,
+ variables=list(self.toBeSampled),
+ population=self.population,
+ offsprings=rlz,
+ popObjectiveVal=self.objectiveVal,
+ offObjectiveVal=objectiveVal,
+ popFit = self.fitness,
+ offFit = offSpringFitness,
+ popConstV = self.constraintsV,
+ offConstV = g)
+
+ self._collectOptPointMulti(self.population,
+ self.rank,
+ self.crowdingDistance,
+ self.objectiveVal,
+ self.fitness,
+ self.constraintsV)
+ self._resolveNewGenerationMulti(traj, rlz, info)
\ No newline at end of file
diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py
index 1b754af494..d5649880a4 100644
--- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py
+++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py
@@ -19,13 +19,20 @@
2. fitnessBased
Created June,16,2020
- @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
+ @authors: Mohammad Abdo, Junyung Kim, Diego Mandelli, Andrea Alfonsi
"""
-
+# External Modules----------------------------------------------------------------------------------
import numpy as np
import xarray as xr
+from ravenframework.utils import frontUtils
+# External Modules End------------------------------------------------------------------------------
+
+# Internal Modules----------------------------------------------------------------------------------
+from ...utils.gaUtils import dataArrayToDict, datasetToDataArray
+# Internal Modules End------------------------------------------------------------------------------
# @profile
+
def ageBased(newRlz,**kwargs):
"""
ageBased survivorSelection mechanism for new generation selection.
@@ -80,7 +87,7 @@ def fitnessBased(newRlz,**kwargs):
It combines the parents and children/offsprings then keeps the fittest individuals
to revert to the same population size.
@ In, newRlz, xr.DataSet, containing either a single realization, or a batch of realizations.
- @ In, kwargs, dict, dictionary of parameters for this mutation method:
+ @ In, kwargs, dict, dictionary of parameters for this survivor slection method:
age, list, ages of each chromosome in the population of the previous generation
offSpringsFitness, xr.DataArray, fitness of each new child, i.e., np.shape(offSpringsFitness) = nChildren x nGenes
variables
@@ -96,11 +103,12 @@ def fitnessBased(newRlz,**kwargs):
else:
popAge = kwargs['age']
- offSpringsFitness = np.atleast_1d(kwargs['offSpringsFitness'])
+ offSpringsFitness = datasetToDataArray(kwargs['offSpringsFitness'], list(kwargs['offSpringsFitness'].keys())).data
+ offSpringsFitness = np.array([item for sublist in offSpringsFitness for item in sublist])
offSprings = np.atleast_2d(newRlz[kwargs['variables']].to_array().transpose().data)
population = np.atleast_2d(kwargs['population'].data)
- popFitness = np.atleast_1d(kwargs['fitness'].data)
-
+ popFitness = datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data
+ popFitness = np.array([item for sublist in popFitness for item in sublist])
newPopulation = population
newFitness = popFitness
newAge = list(map(lambda x:x+1, popAge))
@@ -116,19 +124,122 @@ def fitnessBased(newRlz,**kwargs):
newAge = sortedAgeT[:-len(offSprings)]
newPopulationArray = xr.DataArray(newPopulationSorted,
- dims=['chromosome','Gene'],
- coords={'chromosome':np.arange(np.shape(newPopulationSorted)[0]),
- 'Gene': kwargs['variables']})
+ dims=['chromosome','Gene'],
+ coords={'chromosome':np.arange(np.shape(newPopulationSorted)[0]),
+ 'Gene': kwargs['variables']})
newFitness = xr.DataArray(newFitness,
dims=['chromosome'],
coords={'chromosome':np.arange(np.shape(newFitness)[0])})
+ # newFitness = newFitness.to_dataset(name = list(kwargs['fitness'].keys())[0])
+ newFitness = newFitness.to_dataset(name = list(kwargs['variables'])[0])
#return newPopulationArray,newFitness,newAge
return newPopulationArray,newFitness,newAge,kwargs['popObjectiveVal']
+# @profile
+def rankNcrowdingBased(offsprings, **kwargs):
+ """
+ rankNcrowdingBased survivorSelection mechanism for new generation selection
+ It combines the parents and children/offsprings then calculates their rank and crowding distance.
+ After having ranks and crowding distance, it keeps the lowest ranks (and highest crowding distance if indivisuals have same rank.
+ @ In, newRlz, xr.DataSet, containing either a single realization, or a batch of realizations.
+ @ In, kwargs, dict, dictionary of parameters for this survivor slection method:
+ variables
+ population
+ @ Out, newPopulation, xr.DataArray, newPopulation for the new generation, i.e. np.shape(newPopulation) = populationSize x nGenes.
+ @ Out, newRank, xr.DataArray, rank of each chromosome in the new population
+ @ Out, newCD, xr.DataArray, crowding distance of each chromosome in the new population.
+ """
+ popSize = np.shape(kwargs['population'])[0]
+ if ('age' not in kwargs.keys() or kwargs['age'] == None):
+ popAge = [0]*popSize
+ else:
+ popAge = kwargs['age']
+
+ population = np.atleast_2d(kwargs['population'].data)
+ offSprings = np.atleast_2d(offsprings[kwargs['variables']].to_array().transpose().data)
+ popObjectiveVal = kwargs['popObjectiveVal']
+ offObjectiveVal = kwargs['offObjectiveVal']
+ popFit = kwargs['popFit']
+ popFitArray = []
+ offFit = kwargs['offFit']
+ offFitArray = []
+ for i in list(popFit.keys()): #NOTE popFit.keys() and offFit.keys() must be same.
+ popFitArray.append(popFit[i].data.tolist())
+ offFitArray.append(offFit[i].data.tolist())
+
+ newFitMerged = np.array([i + j for i, j in zip(popFitArray, offFitArray)])
+ newFitMerged_pair = [list(ele) for ele in list(zip(*newFitMerged))]
+
+ popConstV = kwargs['popConstV'].data
+ offConstV = kwargs['offConstV'].data
+ newConstVMerged = np.array(popConstV.tolist() + offConstV.tolist())
+
+ newObjectivesMerged = np.array([i + j for i, j in zip(popObjectiveVal, offObjectiveVal)])
+ newObjectivesMerged_pair = [list(ele) for ele in list(zip(*newObjectivesMerged))]
+
+ newPopRank = frontUtils.rankNonDominatedFrontiers(np.array(newFitMerged_pair))
+ newPopRank = xr.DataArray(newPopRank,
+ dims=['rank'],
+ coords={'rank': np.arange(np.shape(newPopRank)[0])})
+
+ newPopCD = frontUtils.crowdingDistance(rank=newPopRank, popSize=len(newPopRank), objectives=np.array(newFitMerged_pair))
+ newPopCD = xr.DataArray(newPopCD,
+ dims=['CrowdingDistance'],
+ coords={'CrowdingDistance': np.arange(np.shape(newPopCD)[0])})
+
+ newAge = list(map(lambda x:x+1, popAge))
+ newPopulationMerged = np.concatenate([population,offSprings])
+ newAge.extend([0]*len(offSprings))
+
+ sortedRank,sortedCD,sortedAge,sortedPopulation,sortedFit,sortedObjectives,sortedConstV = \
+ zip(*[(x,y,z,i,j,k,a) for x,y,z,i,j,k,a in \
+ sorted(zip(newPopRank.data, newPopCD.data, newAge, newPopulationMerged.tolist(), newFitMerged_pair, newObjectivesMerged_pair, newConstVMerged),reverse=False,key=lambda x: (x[0], -x[1]))])
+ sortedRankT, sortedCDT, sortedAgeT, sortedPopulationT, sortedFitT, sortedObjectivesT, sortedConstVT = \
+ np.atleast_1d(list(sortedRank)), list(sortedCD), list(sortedAge),np.atleast_1d(list(sortedPopulation)),np.atleast_1d(list(sortedFit)),np.atleast_1d(list(sortedObjectives)),np.atleast_1d(list(sortedConstV))
+
+ newPopulation = sortedPopulationT[:-len(offSprings)]
+ newObjectives = sortedObjectivesT[:-len(offSprings)]
+ newFit = sortedFitT[:-len(offSprings)]
+
+ newRank = frontUtils.rankNonDominatedFrontiers(newObjectives)
+ newRank = xr.DataArray(newRank,
+ dims=['rank'],
+ coords={'rank': np.arange(np.shape(newRank)[0])})
+
+ newObjectivesP = [list(ele) for ele in list(zip(*newObjectives))]
+ newCD = frontUtils.crowdingDistance(rank=newRank, popSize=len(newRank), objectives=newObjectives)
+ newCD = xr.DataArray(newCD,
+ dims=['CrowdingDistance'],
+ coords={'CrowdingDistance': np.arange(np.shape(newCD)[0])})
+
+ newAge = sortedAgeT[:-len(offSprings)]
+ newConstV = sortedConstVT[:-len(offSprings)]
+
+ for i in range(len(list(popFit.keys()))):
+ fitness = xr.DataArray(newFit[:,i],
+ dims=['chromosome'],
+ coords={'chromosome': np.arange(len(newFit[:,i]))})
+ if i == 0:
+ newFitnessSet = fitness.to_dataset(name = list(popFit.keys())[i])
+ else:
+ newFitnessSet[list(popFit.keys())[i]] = fitness
+
+ newPopulationArray = xr.DataArray(newPopulation,
+ dims=['chromosome','Gene'],
+ coords={'chromosome':np.arange(np.shape(newPopulation)[0]),
+ 'Gene': kwargs['variables']})
+ newConstV = xr.DataArray(newConstV,
+ dims=['chromosome','ConstEvaluation'],
+ coords={'chromosome':np.arange(np.shape(newPopulation)[0]),
+ 'ConstEvaluation':np.arange(np.shape(newConstV)[1])})
+
+ return newPopulationArray,newRank,newAge,newCD,newObjectivesP,newFitnessSet,newConstV
+
__survivorSelectors = {}
__survivorSelectors['ageBased'] = ageBased
__survivorSelectors['fitnessBased'] = fitnessBased
+__survivorSelectors['rankNcrowdingBased'] = rankNcrowdingBased
def returnInstance(cls, name):
"""
@@ -138,5 +249,5 @@ def returnInstance(cls, name):
@ Out, __crossovers[name], instance of class
"""
if name not in __survivorSelectors:
- cls.raiseAnError (IOError, "{} MECHANISM NOT IMPLEMENTED!!!!!".format(name))
+ cls.raiseAnError (IOError, "{} is not an valid option for survivor selector. Please review the spelling of the survivor selector. ".format(name))
return __survivorSelectors[name]
diff --git a/ravenframework/utils/frontUtils.py b/ravenframework/utils/frontUtils.py
index c63eca57fc..ca999e02be 100644
--- a/ravenframework/utils/frontUtils.py
+++ b/ravenframework/utils/frontUtils.py
@@ -44,6 +44,7 @@ def nonDominatedFrontier(data, returnMask, minMask=None):
Reference: the following code has been adapted from https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python
"""
+
if minMask is None:
pass
elif minMask is not None and minMask.shape[0] != data.shape[1]:
@@ -56,8 +57,8 @@ def nonDominatedFrontier(data, returnMask, minMask=None):
isEfficient = np.arange(data.shape[0])
nPoints = data.shape[0]
nextPointIndex = 0
- while nextPointIndex= 0,
+ so if:
+ 1) f(x,y) >= 0 then g = f
+ 2) f(x,y) >= a then g = f - a
+ 3) f(x,y) <= b then g = b - f
+ 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint)
+ """
+ g = eval(Input.name)(Input)
+ return g
+
+
+def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml#
+ """
+ Let's assume that the constraint is:
+ $ x3+x4 < 8 $
+ then g the constraint evaluation function (which has to be > 0) is taken to be:
+ g = 8 - (x3+x4)
+ in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 1 evaluation function
+ """
+ g = 8 - Input.x3 - Input.x4
+ return g
+
+def expConstr2(Input):
+ """
+ Explicit Equality Constraint:
+ let's consider the constraint x1**2 + x2**2 = 25
+ The way to write g is to use a very small number for instance, epsilon = 1e-12
+ and then g = epsilon - abs(constraint)
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 2 evaluation function
+ """
+ g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25)
+ return g
+
+def expConstr3(Input):
+ """
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 3 evaluation function
+ """
+ g = 10 - Input.x3 - Input.x4
+ return g
+
+def impConstr1(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 1 evaluation function
+ """
+ g = 10 - Input.x1**2 - Input.obj
+ return g
+
+def impConstr2(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 2 evaluation function
+ """
+ g = Input.x1**2 + Input.obj1 - 10
+ return g
+
+def impConstr3(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint #3 evaluation function
+ """
+ g = 100 - Input.obj1
+ return g
diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum.py b/tests/framework/AnalyticModels/optimizing/myLocalSum.py
index faec353eb6..d4b6fec246 100644
--- a/tests/framework/AnalyticModels/optimizing/myLocalSum.py
+++ b/tests/framework/AnalyticModels/optimizing/myLocalSum.py
@@ -81,5 +81,4 @@ def constrain(self):
and negative if violated.
"""
explicitConstrain = constraint(self)
- return explicitConstrain
-
+ return explicitConstrain
\ No newline at end of file
diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py
new file mode 100644
index 0000000000..2bad9f6b44
--- /dev/null
+++ b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py
@@ -0,0 +1,37 @@
+# Copyright 2017 Battelle Energy Alliance, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# @author: Mohammad Abdo (@Jimmy-INL)
+
+def evaluate(Inputs):
+ Sum = 0
+ LocalSum1 = 0
+ LocalSum2 = 0
+ for ind,var in enumerate(Inputs.keys()):
+ # write the objective function here
+ Sum += (ind + 1) * Inputs[var]
+ if (ind == 0) or (ind == 1):
+ LocalSum1 += (ind + 1) * Inputs[var]
+ if (ind == 2) or (ind == 3):
+ LocalSum2 += (ind + 1) * Inputs[var]
+ return Sum[:], LocalSum1[:], LocalSum2[:]
+
+def run(self,Inputs):
+ """
+ RAVEN API
+ @ In, self, object, RAVEN container
+ @ In, Inputs, dict, additional inputs
+ @ Out, None
+ """
+ self.obj1,self.obj2,self.obj3 = evaluate(Inputs)
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml
new file mode 100644
index 0000000000..06dbd7d901
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml
@@ -0,0 +1,125 @@
+
+
+
+ raven/tests/framework/Optimizers/GeneticAlgorithms.NSGAII
+ Mohammad Abdo
+ 2024-02-18
+
+ NSGA-II min-max test
+
+
+
+ Multi_beale_bealeFlipped
+ optimize,print
+ 4
+
+
+
+
+ placeholder
+ beale
+ GAopt
+ opt_export
+
+
+
+
+ opt_export
+ optOut
+
+
+
+
+
+
+
+ x,y,obj1,obj2
+
+
+
+
+
+ 0
+ 5
+
+
+
+
+
+
+ 5
+ 42
+ every
+ min, max
+
+
+ 50
+ tournamentSelection
+
+
+ 0.8
+
+
+ 0.8
+
+
+
+
+
+ rankNcrowdingBased
+
+
+ 0.0
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+ obj1, obj2
+ optOut
+ MC_samp
+
+
+
+
+
+
+
+ 50
+ 050877
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+
+
+
+
+
+ x,y
+
+
+
+ trajID
+
+
+
+
+
+
+ csv
+
+
+
+ csv
+
+ trajID
+
+
+
\ No newline at end of file
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml
new file mode 100644
index 0000000000..7db423e7b7
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml
@@ -0,0 +1,132 @@
+
+
+
+ raven/tests/framework/Optimizers/GA.MultiObjZDT1
+ Junyung Kim
+ 2023-02-21
+
+ ZDT1 test using NSGA-II
+
+
+
+ ZDT1
+ optimize,print
+ 1
+
+
+
+
+ placeholder
+ ZDT
+ GAopt
+ opt_export
+
+
+
+
+ opt_export
+ optOut
+
+
+
+
+
+
+
+ x1,x2,x3,obj1,obj2
+
+
+
+
+
+ 0
+ 1
+
+
+
+
+
+
+ 15
+ 42
+ every
+ min,min
+
+
+
+ 10
+ tournamentSelection
+
+
+ 1.0
+
+
+ 1.0
+
+
+
+
+ rankNcrowdingBased
+
+
+
+ 0.0
+
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+ obj1, obj2
+ optOut
+ MC_samp
+
+
+
+
+
+
+ 10
+ 050877
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+
+
+
+
+
+ x1,x2,x3
+
+
+
+ trajID
+
+
+
+
+
+
+ csv
+
+
+
+ csv
+
+ trajID
+
+
+
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml
new file mode 100644
index 0000000000..1f67af2daf
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml
@@ -0,0 +1,164 @@
+
+
+
+ raven/tests/framework/Optimizers/GeneticAlgorithms.NSGAII
+ Junyung Kim
+ 2022-12-21
+
+ NSGA-II min-min test
+
+
+
+ Multi_MinwoReplacement/
+ optimize,print
+ 1
+
+
+
+
+ placeholder
+ myLocalSum
+ GAopt
+ opt_export
+
+
+
+
+ opt_export
+ optOut
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6,obj1,obj2,obj3
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6
+
+
+ x1,x2,x3,x4,x5,x6,obj1
+
+
+ x1,x2,x3,x4,x5,x6,obj1
+
+
+
+
+
+ 2
+ 7
+ withoutReplacement
+
+
+
+
+
+
+ 5
+ 42
+ every
+ min, max, min
+
+
+ 50
+ tournamentSelection
+
+
+ 0.8
+
+
+ 0.8
+
+
+
+
+
+ rankNcrowdingBased
+
+
+ 0.0
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+ obj1, obj2, obj3
+ optOut
+ MC_samp
+ expConstr3
+ impConstr2
+ impConstr3
+
+
+
+
+
+
+ 50
+ 050877
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6
+
+
+
+ trajID
+
+
+
+
+
+
+ csv
+
+
+
+ csv
+
+ trajID
+
+
+
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml
index 4da8636350..08267e5c89 100644
--- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml
+++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml
@@ -55,14 +55,14 @@
- 20
+ 542every20
- rouletteWheel
+ tournamentSelection0.8
@@ -71,7 +71,7 @@
0.9
-
+ fitnessBased
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv
new file mode 100644
index 0000000000..422ae2a2a6
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv
@@ -0,0 +1,161 @@
+x1,x2,x3,obj1,obj2,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,accepted
+0.902940987587,0.947612243227,0.840374259707,0.902940987587,3.96681957049,0.0,1.0,3.0,inf,0.902940987587,3.96681957049,first
+0.227236453264,0.847510234417,0.362760231915,0.227236453264,3.60499993579,0.0,1.0,2.0,inf,0.227236453264,3.60499993579,first
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,1.0,1.0,inf,0.766431005617,1.3169883176,first
+0.633202111729,0.793545654927,0.564774226762,0.633202111729,3.28234279694,0.0,1.0,2.0,2.0,0.633202111729,3.28234279694,first
+0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,1.0,1.0,1.32735741676,0.306377726911,1.93224686343,first
+0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,1.0,1.0,inf,0.110044764846,4.28628616584,first
+0.331692186261,0.571854743308,0.965348788995,0.331692186261,4.24730587019,0.0,1.0,3.0,inf,0.331692186261,4.24730587019,first
+0.267873673297,0.166777967281,0.847808119107,0.267873673297,3.00298144409,0.0,1.0,1.0,0.749061564967,0.267873673297,3.00298144409,first
+0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,1.0,2.0,inf,0.713407223745,2.25417202135,first
+0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,1.0,1.0,0.672642583243,0.13264102096,3.37050011696,first
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,2.0,1.0,inf,0.766431005617,1.3169883176,accepted
+0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,2.0,1.0,inf,0.110044764846,4.28628616584,accepted
+0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,2.0,1.0,1.65715637093,0.227236453264,1.41712807233,accepted
+0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,2.0,1.0,1.14481568274,0.13264102096,3.37050011696,accepted
+0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,2.0,2.0,inf,0.227236453264,1.70412941407,accepted
+0.227236453264,0.304242241034,0.362760231915,0.227236453264,2.17521187389,0.0,2.0,3.0,inf,0.227236453264,2.17521187389,accepted
+0.766431005617,0.399860977754,0.39961784645,0.766431005617,1.78453837074,0.0,2.0,3.0,inf,0.766431005617,1.78453837074,accepted
+0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,2.0,3.0,2.0,0.306377726911,1.93224686343,accepted
+0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,2.0,4.0,inf,0.713407223745,2.25417202135,accepted
+0.227236453264,0.192211290866,0.785175960228,0.227236453264,2.98689433584,0.0,2.0,4.0,inf,0.227236453264,2.98689433584,accepted
+0.110044764846,0.207941663733,0.738899003886,0.110044764846,3.19042251763,0.0,3.0,1.0,inf,0.110044764846,3.19042251763,accepted
+0.766431005617,0.184854460225,0.39961784645,0.766431005617,1.300728344,0.0,3.0,1.0,inf,0.766431005617,1.300728344,accepted
+0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,3.0,1.0,1.2940861679,0.227236453264,1.41712807233,accepted
+0.182236086852,0.192211290866,0.39961784645,0.182236086852,2.0642954793,0.0,3.0,1.0,1.11694361992,0.182236086852,2.0642954793,accepted
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,3.0,2.0,inf,0.766431005617,1.3169883176,accepted
+0.110044764846,0.192211290866,0.926658862253,0.110044764846,3.66420727224,0.0,3.0,2.0,inf,0.110044764846,3.66420727224,accepted
+0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,3.0,2.0,1.84044490677,0.227236453264,1.70412941407,accepted
+0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,3.0,2.0,1.01360466361,0.13264102096,3.37050011696,accepted
+0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,3.0,3.0,inf,0.110044764846,4.28628616584,accepted
+0.766431005617,0.192211290866,0.570443976105,0.766431005617,1.70051534965,0.0,3.0,3.0,inf,0.766431005617,1.70051534965,accepted
+0.766431005617,0.184854460225,0.39961784645,0.766431005617,1.300728344,0.0,4.0,1.0,inf,0.766431005617,1.300728344,accepted
+0.110044764846,0.207941663733,0.37081825509,0.110044764846,2.18754187519,0.0,4.0,1.0,inf,0.110044764846,2.18754187519,accepted
+0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,4.0,1.0,1.75104033403,0.227236453264,1.41712807233,accepted
+0.182236086852,0.192211290866,0.39961784645,0.182236086852,2.0642954793,0.0,4.0,1.0,1.04728459672,0.182236086852,2.0642954793,accepted
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,4.0,2.0,inf,0.766431005617,1.3169883176,accepted
+0.110044764846,0.115869054598,0.738899003886,0.110044764846,2.93801908476,0.0,4.0,2.0,inf,0.110044764846,2.93801908476,accepted
+0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,4.0,2.0,2.0,0.227236453264,1.70412941407,accepted
+0.110044764846,0.207941663733,0.738899003886,0.110044764846,3.19042251763,0.0,4.0,3.0,inf,0.110044764846,3.19042251763,accepted
+0.766431005617,0.192211290866,0.570443976105,0.766431005617,1.70051534965,0.0,4.0,3.0,inf,0.766431005617,1.70051534965,accepted
+0.330898024452,0.207941663733,0.738899003886,0.330898024452,2.71321497121,0.0,4.0,3.0,2.0,0.330898024452,2.71321497121,accepted
+0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,0.0,5.0,1.0,inf,0.110044764846,1.50665039766,accepted
+0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,0.0,5.0,1.0,inf,0.766431005617,0.61568905179,accepted
+0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,0.0,5.0,1.0,1.72098090572,0.694784936191,1.37029270323,accepted
+0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,5.0,1.0,1.04389327594,0.227236453264,1.41712807233,accepted
+0.110044764846,0.207941663733,0.37081825509,0.110044764846,2.18754187519,0.0,5.0,2.0,inf,0.110044764846,2.18754187519,accepted
+0.766431005617,0.184854460225,0.203061229597,0.766431005617,0.875972693903,0.0,5.0,2.0,inf,0.766431005617,0.875972693903,accepted
+0.227236453264,0.192211290866,0.291229140081,0.227236453264,1.70412941407,0.0,5.0,2.0,1.79604835097,0.227236453264,1.70412941407,accepted
+0.182236086852,0.192211290866,0.39961784645,0.182236086852,2.0642954793,0.0,5.0,2.0,0.547116393146,0.182236086852,2.0642954793,accepted
+0.110044764846,0.115869054598,0.738899003886,0.110044764846,2.93801908476,0.0,5.0,3.0,inf,0.110044764846,2.93801908476,accepted
+0.766431005617,0.207941663733,0.218440438439,0.766431005617,0.957477514551,0.0,5.0,3.0,inf,0.766431005617,0.957477514551,accepted
+0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,0.0,6.0,1.0,inf,0.766431005617,0.61568905179,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,0.0,6.0,1.0,inf,0.110044764846,1.2371480565,accepted
+0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,0.0,6.0,1.0,2.0,0.337615172224,1.00909282563,accepted
+0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,0.0,6.0,2.0,inf,0.110044764846,1.50665039766,accepted
+0.766431005617,0.184854460225,0.203061229597,0.766431005617,0.875972693903,0.0,6.0,2.0,inf,0.766431005617,0.875972693903,accepted
+0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,0.0,6.0,2.0,1.67951302037,0.694784936191,1.37029270323,accepted
+0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,0.0,6.0,2.0,0.930869456237,0.227236453264,1.41712807233,accepted
+0.148086928797,0.184854460225,0.139331453512,0.148086928797,1.43208570884,0.0,6.0,2.0,0.32048697963,0.148086928797,1.43208570884,accepted
+0.766431005617,0.207941663733,0.218440438439,0.766431005617,0.957477514551,0.0,6.0,3.0,inf,0.766431005617,0.957477514551,accepted
+0.110044764846,0.184854460225,0.168291045858,0.110044764846,1.58337958114,0.0,6.0,3.0,inf,0.110044764846,1.58337958114,accepted
+0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,2.0,7.0,1.0,inf,0.766431005617,0.61568905179,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,2.0,7.0,1.0,inf,0.110044764846,1.2371480565,accepted
+0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,2.0,7.0,1.0,1.47967201149,0.337615172224,1.00909282563,accepted
+0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,2.0,7.0,1.0,1.28633056549,0.535774680445,0.720668788305,accepted
+0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,2.0,7.0,2.0,inf,0.110044764846,1.50665039766,accepted
+0.766431005617,0.220241048192,0.0769799126026,0.766431005617,0.687574395089,2.0,7.0,2.0,inf,0.766431005617,0.687574395089,accepted
+0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,2.0,7.0,2.0,1.71216252914,0.694784936191,1.37029270323,accepted
+0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,2.0,7.0,2.0,0.908333052367,0.227236453264,1.41712807233,accepted
+0.148086928797,0.184854460225,0.139331453512,0.148086928797,1.43208570884,2.0,7.0,2.0,0.287837470856,0.148086928797,1.43208570884,accepted
+0.766431005617,0.184854460225,0.203061229597,0.766431005617,0.875972693903,2.0,7.0,3.0,inf,0.766431005617,0.875972693903,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,6.0,8.0,1.0,inf,0.110044764846,1.2371480565,accepted
+0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,6.0,8.0,1.0,inf,0.766431005617,0.276402028431,accepted
+0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,6.0,8.0,1.0,1.41592495639,0.535774680445,0.720668788305,accepted
+0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,6.0,8.0,1.0,0.765262284562,0.337615172224,1.00909282563,accepted
+0.278871351918,0.184854460225,0.0769799126026,0.278871351918,1.07986463561,6.0,8.0,1.0,0.584075043611,0.278871351918,1.07986463561,accepted
+0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,6.0,8.0,2.0,inf,0.110044764846,1.50665039766,accepted
+0.77987554478,0.184854460225,0.0769799126026,0.77987554478,0.605473365792,6.0,8.0,2.0,inf,0.77987554478,0.605473365792,accepted
+0.694784936191,0.184854460225,0.39961784645,0.694784936191,1.37029270323,6.0,8.0,2.0,1.69429602798,0.694784936191,1.37029270323,accepted
+0.766431005617,0.184854460225,0.0769799126026,0.766431005617,0.61568905179,6.0,8.0,2.0,0.97572232943,0.766431005617,0.61568905179,accepted
+0.227236453264,0.00706630852238,0.362760231915,0.227236453264,1.41712807233,6.0,8.0,2.0,1.02427767057,0.227236453264,1.41712807233,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,1.0,9.0,1.0,inf,0.110044764846,1.2371480565,accepted
+0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,1.0,9.0,1.0,inf,0.766431005617,0.276402028431,accepted
+0.57200419986,0.184854460225,0.0368869491007,0.57200419986,0.689255349318,1.0,9.0,1.0,0.813821875556,0.57200419986,0.689255349318,accepted
+0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,1.0,9.0,1.0,0.624852683938,0.337615172224,1.00909282563,accepted
+0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,1.0,9.0,1.0,0.602103080833,0.486742150617,0.764927153681,accepted
+0.250461821735,0.184854460225,0.0368869491007,0.250461821735,1.01941071485,1.0,9.0,1.0,0.584075043611,0.250461821735,1.01941071485,accepted
+0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,1.0,9.0,1.0,0.208659743602,0.535774680445,0.720668788305,accepted
+0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,1.0,9.0,2.0,inf,0.110044764846,1.50665039766,accepted
+0.864167563818,0.184854460225,0.0368869491007,0.864167563818,0.465627310549,1.0,9.0,2.0,inf,0.864167563818,0.465627310549,accepted
+0.278871351918,0.184854460225,0.0769799126026,0.278871351918,1.07986463561,1.0,9.0,2.0,2.0,0.278871351918,1.07986463561,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,5.0,10.0,1.0,inf,0.110044764846,1.2371480565,accepted
+0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,5.0,10.0,1.0,inf,0.740768615329,0.220002925316,accepted
+0.57200419986,0.184854460225,0.0368869491007,0.57200419986,0.689255349318,5.0,10.0,1.0,0.817240341286,0.57200419986,0.689255349318,accepted
+0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,5.0,10.0,1.0,0.624811669954,0.337615172224,1.00909282563,accepted
+0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,5.0,10.0,1.0,0.597740245314,0.486742150617,0.764927153681,accepted
+0.250461821735,0.184854460225,0.0368869491007,0.250461821735,1.01941071485,5.0,10.0,1.0,0.483412783551,0.250461821735,1.01941071485,accepted
+0.140084013608,0.184854460225,0.0368869491007,0.140084013608,1.18224255812,5.0,10.0,1.0,0.436695556704,0.140084013608,1.18224255812,accepted
+0.535774680445,0.184854460225,0.0368869491007,0.535774680445,0.720668788305,5.0,10.0,1.0,0.209577537556,0.535774680445,0.720668788305,accepted
+0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,5.0,10.0,2.0,inf,0.766431005617,0.276402028431,accepted
+0.110044764846,0.184854460225,0.139331453512,0.110044764846,1.50665039766,5.0,10.0,2.0,inf,0.110044764846,1.50665039766,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,0.0,11.0,1.0,inf,0.110044764846,1.2371480565,accepted
+0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,0.0,11.0,1.0,inf,0.740768615329,0.220002925316,accepted
+0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,0.0,11.0,1.0,1.4149805866,0.465598022674,0.410263067925,accepted
+0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,0.0,11.0,1.0,0.939973901275,0.337615172224,1.00909282563,accepted
+0.250461821735,0.184854460225,0.0368869491007,0.250461821735,1.01941071485,0.0,11.0,1.0,0.483412783551,0.250461821735,1.01941071485,accepted
+0.140084013608,0.184854460225,0.0368869491007,0.140084013608,1.18224255812,0.0,11.0,1.0,0.436695556704,0.140084013608,1.18224255812,accepted
+0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,0.0,11.0,2.0,inf,0.766431005617,0.276402028431,accepted
+0.110044764846,0.00919704931071,0.250251364021,0.110044764846,1.33596804376,0.0,11.0,2.0,inf,0.110044764846,1.33596804376,accepted
+0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,0.0,11.0,2.0,1.86602707654,0.486742150617,0.764927153681,accepted
+0.740768615329,0.0939819368287,0.0368869491007,0.740768615329,0.376929885588,0.0,11.0,2.0,0.887165606871,0.740768615329,0.376929885588,accepted
+0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,2.0,12.0,1.0,inf,0.740768615329,0.220002925316,accepted
+0.110044764846,0.0938732896684,0.0368869491007,0.110044764846,1.0008561335,2.0,12.0,1.0,inf,0.110044764846,1.0008561335,accepted
+0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,2.0,12.0,1.0,2.0,0.465598022674,0.410263067925,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,2.0,12.0,2.0,inf,0.110044764846,1.2371480565,accepted
+0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,2.0,12.0,2.0,inf,0.766431005617,0.276402028431,accepted
+0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,2.0,12.0,2.0,0.848182252541,0.486742150617,0.764927153681,accepted
+0.638270586878,0.184854460225,0.0368869491007,0.638270586878,0.634271442795,2.0,12.0,2.0,0.790857585549,0.638270586878,0.634271442795,accepted
+0.337615172224,0.184854460225,0.0769799126026,0.337615172224,1.00909282563,2.0,12.0,2.0,0.962497323004,0.337615172224,1.00909282563,accepted
+0.740768615329,0.0939819368287,0.0368869491007,0.740768615329,0.376929885588,2.0,12.0,2.0,0.567742703848,0.740768615329,0.376929885588,accepted
+0.140084013608,0.184854460225,0.0368869491007,0.140084013608,1.18224255812,2.0,12.0,2.0,0.584075043611,0.140084013608,1.18224255812,accepted
+0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,7.0,13.0,1.0,inf,0.740768615329,0.220002925316,accepted
+0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,7.0,13.0,1.0,inf,0.0480589254405,0.904364911855,accepted
+0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,7.0,13.0,1.0,1.32475105339,0.43385165125,0.435519821735,accepted
+0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,7.0,13.0,1.0,0.757983752596,0.465598022674,0.410263067925,accepted
+0.110044764846,0.0938732896684,0.0368869491007,0.110044764846,1.0008561335,7.0,13.0,2.0,inf,0.110044764846,1.0008561335,accepted
+0.766431005617,0.00919704931071,0.0769799126026,0.766431005617,0.276402028431,7.0,13.0,2.0,inf,0.766431005617,0.276402028431,accepted
+0.486742150617,0.184854460225,0.0368869491007,0.486742150617,0.764927153681,7.0,13.0,2.0,1.31076353304,0.486742150617,0.764927153681,accepted
+0.638270586878,0.184854460225,0.0368869491007,0.638270586878,0.634271442795,7.0,13.0,2.0,0.9225795106,0.638270586878,0.634271442795,accepted
+0.740768615329,0.0939819368287,0.0368869491007,0.740768615329,0.376929885588,7.0,13.0,2.0,0.689236466957,0.740768615329,0.376929885588,accepted
+0.110044764846,0.184854460225,0.0368869491007,0.110044764846,1.2371480565,7.0,13.0,3.0,inf,0.110044764846,1.2371480565,accepted
+0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,2.0,14.0,1.0,inf,0.0480589254405,0.904364911855,accepted
+0.844875311909,0.00919704931071,0.0368869491007,0.844875311909,0.157598636751,2.0,14.0,1.0,inf,0.844875311909,0.157598636751,accepted
+0.281854773006,0.00919704931071,0.0368869491007,0.281854773006,0.571840697907,2.0,14.0,1.0,1.11200143643,0.281854773006,0.571840697907,accepted
+0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,2.0,14.0,1.0,0.446966472614,0.43385165125,0.435519821735,accepted
+0.628942842742,0.00919704931071,0.0368869491007,0.628942842742,0.292145946539,2.0,14.0,1.0,0.414011590831,0.628942842742,0.292145946539,accepted
+0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,2.0,14.0,1.0,0.366643659721,0.740768615329,0.220002925316,accepted
+0.547971882752,0.00919704931071,0.0368869491007,0.547971882752,0.348486150901,2.0,14.0,1.0,0.363168285924,0.547971882752,0.348486150901,accepted
+0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,2.0,14.0,1.0,0.259767640834,0.465598022674,0.410263067925,accepted
+0.803480928019,0.00919704931071,0.0368869491007,0.803480928019,0.181923720294,2.0,14.0,1.0,0.214219331903,0.803480928019,0.181923720294,accepted
+0.110044764846,0.0938732896684,0.0368869491007,0.110044764846,1.0008561335,2.0,14.0,2.0,inf,0.110044764846,1.0008561335,accepted
+0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,4.0,15.0,1.0,inf,0.0480589254405,0.904364911855,accepted
+0.956800884557,0.00919704931071,0.0368869491007,0.956800884557,0.094661790274,4.0,15.0,1.0,inf,0.956800884557,0.094661790274,accepted
+0.281854773006,0.00919704931071,0.0368869491007,0.281854773006,0.571840697907,4.0,15.0,1.0,0.704629371176,0.281854773006,0.571840697907,accepted
+0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,4.0,15.0,1.0,0.36506593516,0.43385165125,0.435519821735,accepted
+0.2976351707,0.00919704931071,0.0368869491007,0.2976351707,0.556200599822,4.0,15.0,1.0,0.335619852656,0.2976351707,0.556200599822,accepted
+0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,4.0,15.0,1.0,0.403785617585,0.740768615329,0.220002925316,accepted
+0.547971882752,0.00919704931071,0.0368869491007,0.547971882752,0.348486150901,4.0,15.0,1.0,0.325625370568,0.547971882752,0.348486150901,accepted
+0.628942842742,0.00919704931071,0.0368869491007,0.628942842742,0.292145946539,4.0,15.0,1.0,0.370837266347,0.628942842742,0.292145946539,accepted
+0.844875311909,0.00919704931071,0.0368869491007,0.844875311909,0.157598636751,4.0,15.0,1.0,0.392525624748,0.844875311909,0.157598636751,accepted
+0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,4.0,15.0,1.0,0.233068830687,0.465598022674,0.410263067925,accepted
+0.0480589254405,0.00919704931071,0.0368869491007,0.0480589254405,0.904364911855,4.0,15.0,1.0,inf,0.0480589254405,0.904364911855,final
+0.956800884557,0.00919704931071,0.0368869491007,0.956800884557,0.094661790274,4.0,15.0,1.0,inf,0.956800884557,0.094661790274,final
+0.281854773006,0.00919704931071,0.0368869491007,0.281854773006,0.571840697907,4.0,15.0,1.0,0.704629371176,0.281854773006,0.571840697907,final
+0.43385165125,0.00919704931071,0.0368869491007,0.43385165125,0.435519821735,4.0,15.0,1.0,0.36506593516,0.43385165125,0.435519821735,final
+0.2976351707,0.00919704931071,0.0368869491007,0.2976351707,0.556200599822,4.0,15.0,1.0,0.335619852656,0.2976351707,0.556200599822,final
+0.740768615329,0.00919704931071,0.0368869491007,0.740768615329,0.220002925316,4.0,15.0,1.0,0.403785617585,0.740768615329,0.220002925316,final
+0.547971882752,0.00919704931071,0.0368869491007,0.547971882752,0.348486150901,4.0,15.0,1.0,0.325625370568,0.547971882752,0.348486150901,final
+0.628942842742,0.00919704931071,0.0368869491007,0.628942842742,0.292145946539,4.0,15.0,1.0,0.370837266347,0.628942842742,0.292145946539,final
+0.844875311909,0.00919704931071,0.0368869491007,0.844875311909,0.157598636751,4.0,15.0,1.0,0.392525624748,0.844875311909,0.157598636751,final
+0.465598022674,0.00919704931071,0.0368869491007,0.465598022674,0.410263067925,4.0,15.0,1.0,0.233068830687,0.465598022674,0.410263067925,final
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv
new file mode 100644
index 0000000000..ff6c528364
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv
@@ -0,0 +1,255 @@
+x1,x2,x3,x4,x5,x6,obj1,obj2,obj3,age,batchId,rank,CD,FitnessEvaluation_obj1,FitnessEvaluation_obj2,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,accepted
+4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,1.0,14.0,inf,112.0,-5.0,-2.0,1.0,first
+7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,0.0,1.0,3.0,1.6,89.0,-15.0,3.0,11.0,first
+4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,46.0,0.0,1.0,18.0,inf,113.0,-4.0,-3.0,4.0,first
+7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,0.0,1.0,3.0,inf,86.0,-15.0,2.0,14.0,first
+4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,0.0,1.0,4.0,inf,90.0,-16.0,1.0,10.0,first
+4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,0.0,1.0,4.0,0.899122807018,95.0,-16.0,3.0,5.0,first
+4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,0.0,1.0,3.0,inf,95.0,-18.0,3.0,5.0,first
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,1.0,4.0,inf,97.0,-12.0,3.0,3.0,first
+4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,0.0,1.0,5.0,1.59523809524,94.0,-14.0,1.0,6.0,first
+2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,38.0,0.0,1.0,25.0,inf,116.0,-1.0,-1.0,-5.0,first
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,1.0,1.0,inf,85.0,-19.0,5.0,15.0,first
+6.0,2.0,7.0,5.0,3.0,4.0,90.0,10.0,41.0,0.0,1.0,15.0,inf,112.0,-5.0,-2.0,10.0,first
+3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,1.0,8.0,inf,111.0,-6.0,-1.0,8.0,first
+2.0,3.0,6.0,7.0,4.0,5.0,104.0,8.0,46.0,0.0,1.0,26.0,inf,117.0,0.0,-3.0,-4.0,first
+7.0,3.0,5.0,4.0,2.0,6.0,90.0,13.0,31.0,0.0,1.0,5.0,inf,90.0,-13.0,1.0,10.0,first
+5.0,3.0,2.0,6.0,4.0,7.0,103.0,11.0,30.0,0.0,1.0,19.0,inf,113.0,-4.0,2.0,-3.0,first
+6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,1.0,9.0,inf,111.0,-6.0,-1.0,8.0,first
+2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,1.0,10.0,inf,111.0,-6.0,0.0,-1.0,first
+4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,0.0,1.0,5.0,inf,93.0,-16.0,1.0,7.0,first
+5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,0.0,1.0,2.0,inf,93.0,-17.0,5.0,7.0,first
+3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,1.0,11.0,inf,111.0,-6.0,-1.0,11.0,first
+3.0,6.0,4.0,2.0,5.0,7.0,102.0,15.0,20.0,0.0,1.0,16.0,inf,112.0,-5.0,4.0,-2.0,first
+6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,0.0,1.0,5.0,inf,99.0,-10.0,2.0,1.0,first
+4.0,3.0,2.0,6.0,5.0,7.0,107.0,10.0,30.0,0.0,1.0,27.0,inf,117.0,0.0,2.0,-7.0,first
+7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,1.0,12.0,inf,111.0,-6.0,-1.0,12.0,first
+5.0,3.0,6.0,7.0,2.0,4.0,91.0,11.0,46.0,0.0,1.0,21.0,inf,113.0,-4.0,-3.0,9.0,first
+4.0,2.0,3.0,5.0,6.0,7.0,109.0,8.0,29.0,0.0,1.0,29.0,inf,119.0,2.0,2.0,-9.0,first
+5.0,3.0,6.0,7.0,4.0,2.0,89.0,11.0,46.0,0.0,1.0,22.0,inf,113.0,-4.0,-3.0,11.0,first
+2.0,3.0,5.0,6.0,4.0,7.0,109.0,8.0,39.0,0.0,1.0,31.0,inf,120.0,3.0,-1.0,-9.0,first
+3.0,2.0,7.0,4.0,6.0,5.0,104.0,7.0,37.0,0.0,1.0,23.0,inf,115.0,-2.0,-1.0,-4.0,first
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,1.0,1.0,0.907894736842,83.0,-19.0,0.0,17.0,first
+3.0,5.0,6.0,4.0,7.0,2.0,94.0,13.0,34.0,0.0,1.0,6.0,inf,94.0,-13.0,0.0,6.0,first
+7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,0.0,1.0,2.0,inf,86.0,-19.0,4.0,14.0,first
+7.0,2.0,5.0,4.0,6.0,3.0,90.0,11.0,31.0,0.0,1.0,6.0,inf,90.0,-11.0,1.0,10.0,first
+3.0,4.0,2.0,6.0,7.0,5.0,106.0,11.0,30.0,0.0,1.0,24.0,inf,116.0,-1.0,2.0,-6.0,first
+4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,0.0,1.0,4.0,inf,98.0,-16.0,5.0,2.0,first
+4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,0.0,1.0,4.0,1.45175438596,89.0,-16.0,0.0,11.0,first
+5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,0.0,1.0,3.0,1.43333333333,94.0,-17.0,5.0,6.0,first
+7.0,2.0,5.0,4.0,3.0,6.0,93.0,11.0,31.0,0.0,1.0,7.0,inf,93.0,-11.0,1.0,7.0,first
+7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,0.0,1.0,4.0,inf,86.0,-13.0,0.0,14.0,first
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,1.0,2.0,inf,83.0,-19.0,0.0,17.0,first
+5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,1.0,13.0,inf,111.0,-6.0,-1.0,6.0,first
+3.0,2.0,5.0,6.0,4.0,7.0,108.0,7.0,39.0,0.0,1.0,28.0,inf,119.0,2.0,-1.0,-8.0,first
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,1.0,1.0,inf,81.0,-17.0,0.0,19.0,first
+2.0,4.0,7.0,5.0,6.0,3.0,99.0,10.0,41.0,0.0,1.0,17.0,inf,112.0,-5.0,-2.0,1.0,first
+4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,0.0,1.0,3.0,inf,88.0,-16.0,1.0,12.0,first
+5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,0.0,1.0,3.0,inf,96.0,-17.0,5.0,4.0,first
+5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,1.0,1.0,2.5,84.0,-19.0,1.0,16.0,first
+4.0,3.0,7.0,6.0,2.0,5.0,95.0,10.0,45.0,0.0,1.0,20.0,inf,113.0,-4.0,-3.0,5.0,first
+2.0,4.0,3.0,5.0,7.0,6.0,110.0,10.0,29.0,0.0,1.0,30.0,inf,120.0,3.0,2.0,-10.0,first
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,2.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,2.0,1.0,inf,81.0,-17.0,0.0,19.0,accepted
+5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,2.0,1.0,1.5,84.0,-19.0,1.0,16.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,2.0,1.0,inf,83.0,-19.0,0.0,17.0,accepted
+5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,0.0,2.0,2.0,inf,93.0,-17.0,5.0,7.0,accepted
+7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,0.0,2.0,2.0,inf,86.0,-19.0,4.0,14.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,2.0,2.0,inf,83.0,-19.0,0.0,17.0,accepted
+7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,0.0,2.0,3.0,0.638111888112,86.0,-15.0,2.0,14.0,accepted
+4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,0.0,2.0,3.0,inf,95.0,-18.0,3.0,5.0,accepted
+4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,0.0,2.0,3.0,1.31118881119,88.0,-16.0,1.0,12.0,accepted
+5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,0.0,2.0,3.0,inf,96.0,-17.0,5.0,4.0,accepted
+7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,0.0,2.0,3.0,1.10314685315,89.0,-15.0,3.0,11.0,accepted
+5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,0.0,2.0,3.0,1.02622377622,94.0,-17.0,5.0,6.0,accepted
+4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,0.0,2.0,4.0,inf,90.0,-16.0,1.0,10.0,accepted
+4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,0.0,2.0,4.0,inf,98.0,-16.0,5.0,2.0,accepted
+7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,0.0,2.0,4.0,inf,86.0,-13.0,0.0,14.0,accepted
+7.0,3.0,5.0,2.0,6.0,4.0,90.0,13.0,23.0,0.0,2.0,4.0,inf,90.0,-13.0,3.0,10.0,accepted
+4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,0.0,2.0,4.0,0.460526315789,89.0,-16.0,0.0,11.0,accepted
+6.0,4.0,7.0,3.0,2.0,5.0,87.0,14.0,33.0,0.0,2.0,4.0,1.07456140351,87.0,-14.0,0.0,13.0,accepted
+6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,0.0,2.0,4.0,0.907894736842,94.0,-14.0,3.0,6.0,accepted
+4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,0.0,2.0,4.0,1.31578947368,95.0,-16.0,3.0,5.0,accepted
+7.0,3.0,5.0,4.0,2.0,6.0,90.0,13.0,31.0,0.0,2.0,5.0,2.0,90.0,-13.0,1.0,10.0,accepted
+4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,0.0,2.0,5.0,inf,93.0,-16.0,1.0,7.0,accepted
+6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,0.0,2.0,5.0,inf,94.0,-14.0,3.0,6.0,accepted
+4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,0.0,2.0,6.0,1.6,94.0,-14.0,1.0,6.0,accepted
+7.0,2.0,5.0,4.0,6.0,3.0,90.0,11.0,31.0,0.0,2.0,6.0,inf,90.0,-11.0,1.0,10.0,accepted
+6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,0.0,2.0,6.0,inf,94.0,-12.0,3.0,6.0,accepted
+3.0,5.0,6.0,4.0,7.0,2.0,94.0,13.0,34.0,0.0,2.0,7.0,inf,94.0,-13.0,0.0,6.0,accepted
+7.0,2.0,5.0,4.0,3.0,6.0,93.0,11.0,31.0,0.0,2.0,7.0,inf,93.0,-11.0,1.0,7.0,accepted
+6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,0.0,2.0,7.0,inf,94.0,-12.0,3.0,6.0,accepted
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,2.0,9.0,inf,97.0,-12.0,3.0,3.0,accepted
+5.0,3.0,7.0,2.0,4.0,6.0,96.0,11.0,29.0,0.0,2.0,8.0,2.075,96.0,-11.0,1.0,4.0,accepted
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,0.0,2.0,8.0,inf,97.0,-12.0,3.0,3.0,accepted
+6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,0.0,2.0,10.0,inf,99.0,-10.0,2.0,1.0,accepted
+5.0,3.0,2.0,7.0,6.0,4.0,99.0,11.0,34.0,0.0,2.0,10.0,1.72916666667,99.0,-11.0,1.0,1.0,accepted
+3.0,6.0,4.0,7.0,5.0,2.0,92.0,15.0,40.0,0.0,2.0,6.0,inf,111.0,-6.0,-1.0,8.0,accepted
+6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,6.0,inf,101.0,-8.0,-2.0,11.0,accepted
+6.0,3.0,4.0,7.0,2.0,5.0,92.0,12.0,40.0,0.0,2.0,7.0,1.25,111.0,-6.0,-1.0,8.0,accepted
+4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,13.0,inf,101.0,-8.0,-2.0,1.0,accepted
+2.0,5.0,4.0,6.0,7.0,3.0,101.0,12.0,36.0,0.0,2.0,10.0,inf,111.0,-6.0,0.0,-1.0,accepted
+6.0,4.0,5.0,7.0,2.0,3.0,85.0,14.0,43.0,0.0,2.0,3.0,inf,101.0,-8.0,-2.0,15.0,accepted
+3.0,6.0,7.0,4.0,5.0,2.0,89.0,15.0,37.0,0.0,2.0,5.0,1.0,111.0,-6.0,-1.0,11.0,accepted
+6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,7.0,inf,101.0,-8.0,-2.0,11.0,accepted
+7.0,2.0,6.0,5.0,3.0,4.0,88.0,11.0,38.0,0.0,2.0,5.0,inf,111.0,-6.0,-1.0,12.0,accepted
+4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,12.0,inf,101.0,-8.0,-2.0,1.0,accepted
+5.0,2.0,7.0,4.0,6.0,3.0,94.0,9.0,37.0,0.0,2.0,8.0,inf,111.0,-6.0,-1.0,6.0,accepted
+6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,8.0,inf,101.0,-8.0,-2.0,11.0,accepted
+6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,9.0,inf,101.0,-8.0,-2.0,11.0,accepted
+4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,43.0,0.0,2.0,11.0,inf,101.0,-8.0,-2.0,1.0,accepted
+6.0,3.0,5.0,7.0,2.0,4.0,89.0,12.0,43.0,0.0,2.0,10.0,inf,101.0,-8.0,-2.0,11.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,1.0,3.0,1.0,inf,80.0,-19.0,2.0,20.0,accepted
+6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,1.0,3.0,1.0,inf,83.0,-20.0,4.0,17.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,3.0,2.0,inf,81.0,-17.0,0.0,19.0,accepted
+6.0,7.0,4.0,3.0,2.0,5.0,84.0,20.0,24.0,1.0,3.0,2.0,inf,84.0,-20.0,3.0,16.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,2.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,5.0,2.0,4.0,83.0,20.0,29.0,1.0,3.0,2.0,1.7149122807,83.0,-20.0,2.0,17.0,accepted
+7.0,6.0,4.0,2.0,3.0,5.0,84.0,19.0,20.0,1.0,3.0,2.0,1.2850877193,84.0,-19.0,4.0,16.0,accepted
+5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,1.0,3.0,3.0,inf,84.0,-19.0,1.0,16.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,3.0,3.0,inf,81.0,-17.0,0.0,19.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,3.0,inf,85.0,-19.0,5.0,15.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,3.0,3.0,inf,83.0,-19.0,0.0,17.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,3.0,4.0,inf,83.0,-19.0,0.0,17.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,3.0,4.0,inf,81.0,-17.0,0.0,19.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,3.0,4.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,4.0,5.0,86.0,19.0,17.0,1.0,3.0,5.0,inf,86.0,-19.0,5.0,14.0,accepted
+5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,1.0,3.0,6.0,inf,93.0,-17.0,5.0,7.0,accepted
+7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,1.0,3.0,6.0,inf,86.0,-19.0,4.0,14.0,accepted
+7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,1.0,3.0,7.0,inf,86.0,-15.0,2.0,14.0,accepted
+4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,1.0,3.0,7.0,inf,95.0,-18.0,3.0,5.0,accepted
+5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,1.0,3.0,7.0,inf,96.0,-17.0,5.0,4.0,accepted
+7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,1.0,3.0,7.0,inf,89.0,-15.0,3.0,11.0,accepted
+6.0,5.0,3.0,7.0,2.0,4.0,87.0,16.0,37.0,1.0,3.0,7.0,inf,87.0,-16.0,0.0,13.0,accepted
+5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,1.0,3.0,7.0,1.23333333333,94.0,-17.0,5.0,6.0,accepted
+4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,1.0,3.0,7.0,0.933333333333,88.0,-16.0,1.0,12.0,accepted
+4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,1.0,3.0,8.0,inf,90.0,-16.0,1.0,10.0,accepted
+4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,1.0,3.0,8.0,inf,98.0,-16.0,5.0,2.0,accepted
+7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,1.0,3.0,8.0,inf,86.0,-13.0,0.0,14.0,accepted
+7.0,3.0,5.0,2.0,6.0,4.0,90.0,13.0,23.0,1.0,3.0,8.0,inf,90.0,-13.0,3.0,10.0,accepted
+4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,1.0,3.0,8.0,0.649122807018,95.0,-16.0,3.0,5.0,accepted
+6.0,4.0,7.0,3.0,2.0,5.0,87.0,14.0,33.0,1.0,3.0,8.0,1.12719298246,87.0,-14.0,0.0,13.0,accepted
+6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,3.0,8.0,0.907894736842,94.0,-14.0,3.0,6.0,accepted
+4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,1.0,3.0,8.0,1.07456140351,89.0,-16.0,0.0,11.0,accepted
+7.0,3.0,5.0,4.0,2.0,6.0,90.0,13.0,31.0,1.0,3.0,9.0,inf,90.0,-13.0,1.0,10.0,accepted
+4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,1.0,3.0,9.0,inf,93.0,-16.0,1.0,7.0,accepted
+6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,3.0,9.0,inf,94.0,-14.0,3.0,6.0,accepted
+4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,1.0,3.0,10.0,inf,94.0,-14.0,1.0,6.0,accepted
+7.0,2.0,5.0,4.0,6.0,3.0,90.0,11.0,31.0,1.0,3.0,10.0,inf,90.0,-11.0,1.0,10.0,accepted
+6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,3.0,10.0,inf,94.0,-12.0,3.0,6.0,accepted
+7.0,2.0,5.0,4.0,3.0,6.0,93.0,11.0,31.0,1.0,3.0,11.0,inf,93.0,-11.0,1.0,7.0,accepted
+6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,3.0,11.0,inf,94.0,-12.0,3.0,6.0,accepted
+2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,3.0,11.0,inf,100.0,-14.0,0.0,0.0,accepted
+3.0,5.0,6.0,4.0,7.0,2.0,94.0,13.0,34.0,1.0,3.0,11.0,1.95238095238,94.0,-13.0,0.0,6.0,accepted
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,1.0,3.0,12.0,inf,97.0,-12.0,3.0,3.0,accepted
+5.0,3.0,7.0,2.0,4.0,6.0,96.0,11.0,29.0,1.0,3.0,12.0,inf,96.0,-11.0,1.0,4.0,accepted
+2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,3.0,12.0,inf,100.0,-14.0,0.0,0.0,accepted
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,23.0,1.0,3.0,13.0,inf,97.0,-12.0,3.0,3.0,accepted
+2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,3.0,13.0,inf,100.0,-14.0,0.0,0.0,accepted
+6.0,2.0,5.0,3.0,4.0,7.0,99.0,10.0,27.0,1.0,3.0,14.0,inf,99.0,-10.0,2.0,1.0,accepted
+5.0,3.0,2.0,7.0,6.0,4.0,99.0,11.0,34.0,1.0,3.0,14.0,inf,99.0,-11.0,1.0,1.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,1.0,4.0,1.0,inf,80.0,-19.0,2.0,20.0,accepted
+6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,1.0,4.0,1.0,inf,83.0,-20.0,4.0,17.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,4.0,2.0,inf,81.0,-17.0,0.0,19.0,accepted
+6.0,7.0,4.0,3.0,2.0,5.0,84.0,20.0,24.0,1.0,4.0,2.0,inf,84.0,-20.0,3.0,16.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,2.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,5.0,2.0,4.0,83.0,20.0,29.0,1.0,4.0,2.0,1.7149122807,83.0,-20.0,2.0,17.0,accepted
+7.0,6.0,4.0,2.0,3.0,5.0,84.0,19.0,20.0,1.0,4.0,2.0,1.2850877193,84.0,-19.0,4.0,16.0,accepted
+5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,1.0,4.0,3.0,inf,84.0,-19.0,1.0,16.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,4.0,3.0,inf,81.0,-17.0,0.0,19.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,3.0,inf,85.0,-19.0,5.0,15.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,4.0,3.0,inf,83.0,-19.0,0.0,17.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,1.0,4.0,4.0,inf,83.0,-19.0,0.0,17.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,1.0,4.0,4.0,inf,81.0,-17.0,0.0,19.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,4.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,5.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,6.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,7.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,8.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,9.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,10.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,1.0,4.0,11.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,2.0,4.0,5.0,86.0,19.0,17.0,1.0,4.0,12.0,inf,86.0,-19.0,5.0,14.0,accepted
+5.0,6.0,3.0,2.0,7.0,4.0,93.0,17.0,17.0,1.0,4.0,13.0,inf,93.0,-17.0,5.0,7.0,accepted
+7.0,6.0,2.0,4.0,3.0,5.0,86.0,19.0,22.0,1.0,4.0,13.0,inf,86.0,-19.0,4.0,14.0,accepted
+7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,29.0,1.0,4.0,14.0,inf,86.0,-15.0,2.0,14.0,accepted
+4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,26.0,1.0,4.0,14.0,inf,95.0,-18.0,3.0,5.0,accepted
+5.0,6.0,3.0,2.0,4.0,7.0,96.0,17.0,17.0,1.0,4.0,14.0,inf,96.0,-17.0,5.0,4.0,accepted
+6.0,5.0,3.0,7.0,2.0,4.0,87.0,16.0,37.0,1.0,4.0,14.0,inf,87.0,-16.0,0.0,13.0,accepted
+7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,14.0,inf,88.0,-13.0,2.0,12.0,accepted
+7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,23.0,1.0,4.0,14.0,1.4,89.0,-15.0,3.0,11.0,accepted
+5.0,6.0,2.0,3.0,7.0,4.0,94.0,17.0,18.0,1.0,4.0,14.0,1.1,94.0,-17.0,5.0,6.0,accepted
+4.0,6.0,7.0,2.0,5.0,3.0,88.0,16.0,29.0,1.0,4.0,14.0,0.7,88.0,-16.0,1.0,12.0,accepted
+4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,29.0,1.0,4.0,15.0,inf,90.0,-16.0,1.0,10.0,accepted
+4.0,6.0,3.0,2.0,7.0,5.0,98.0,16.0,17.0,1.0,4.0,15.0,inf,98.0,-16.0,5.0,2.0,accepted
+7.0,3.0,4.0,6.0,5.0,2.0,86.0,13.0,36.0,1.0,4.0,15.0,inf,86.0,-13.0,0.0,14.0,accepted
+7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,15.0,0.324561403509,88.0,-13.0,2.0,12.0,accepted
+6.0,4.0,7.0,3.0,2.0,5.0,87.0,14.0,33.0,1.0,4.0,15.0,1.04385964912,87.0,-14.0,0.0,13.0,accepted
+4.0,6.0,7.0,3.0,2.0,5.0,89.0,16.0,33.0,1.0,4.0,15.0,0.324561403509,89.0,-16.0,0.0,11.0,accepted
+6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,4.0,15.0,1.06578947368,94.0,-14.0,3.0,6.0,accepted
+7.0,3.0,5.0,2.0,6.0,4.0,90.0,13.0,23.0,1.0,4.0,15.0,inf,90.0,-13.0,3.0,10.0,accepted
+4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,26.0,1.0,4.0,15.0,1.15789473684,95.0,-16.0,3.0,5.0,accepted
+4.0,6.0,2.0,7.0,5.0,3.0,93.0,16.0,34.0,1.0,4.0,16.0,inf,93.0,-16.0,1.0,7.0,accepted
+6.0,4.0,5.0,2.0,3.0,7.0,94.0,14.0,23.0,1.0,4.0,16.0,inf,94.0,-14.0,3.0,6.0,accepted
+7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,16.0,inf,88.0,-13.0,2.0,12.0,accepted
+4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,33.0,1.0,4.0,17.0,inf,94.0,-14.0,1.0,6.0,accepted
+6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,4.0,17.0,inf,94.0,-12.0,3.0,6.0,accepted
+7.0,3.0,6.0,2.0,5.0,4.0,88.0,13.0,26.0,1.0,4.0,17.0,inf,88.0,-13.0,2.0,12.0,accepted
+6.0,3.0,5.0,2.0,7.0,4.0,94.0,12.0,23.0,1.0,4.0,18.0,inf,94.0,-12.0,3.0,6.0,accepted
+2.0,6.0,3.0,7.0,5.0,4.0,100.0,14.0,37.0,1.0,4.0,18.0,inf,100.0,-14.0,0.0,0.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,1.0,inf,85.0,-19.0,5.0,15.0,accepted
+7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,0.0,5.0,1.0,inf,80.0,-19.0,2.0,20.0,accepted
+6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,0.0,5.0,1.0,inf,83.0,-20.0,4.0,17.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,1.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,5.0,2.0,inf,81.0,-17.0,0.0,19.0,accepted
+6.0,7.0,4.0,3.0,2.0,5.0,84.0,20.0,24.0,0.0,5.0,2.0,inf,84.0,-20.0,3.0,16.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,2.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,2.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,4.0,2.0,3.0,5.0,84.0,19.0,20.0,0.0,5.0,2.0,1.2350877193,84.0,-19.0,4.0,16.0,accepted
+6.0,7.0,3.0,5.0,2.0,4.0,83.0,20.0,29.0,0.0,5.0,2.0,1.5649122807,83.0,-20.0,2.0,17.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,5.0,3.0,inf,81.0,-17.0,0.0,19.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,3.0,inf,85.0,-19.0,5.0,15.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,5.0,3.0,inf,83.0,-19.0,0.0,17.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,3.0,inf,86.0,-20.0,5.0,14.0,accepted
+5.0,7.0,3.0,6.0,4.0,2.0,84.0,19.0,33.0,0.0,5.0,3.0,2.06666666667,84.0,-19.0,1.0,16.0,accepted
+7.0,5.0,4.0,6.0,2.0,3.0,81.0,17.0,36.0,0.0,5.0,4.0,inf,81.0,-17.0,0.0,19.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,4.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,4.0,inf,86.0,-20.0,5.0,14.0,accepted
+5.0,7.0,4.0,6.0,2.0,3.0,83.0,19.0,36.0,0.0,5.0,4.0,inf,83.0,-19.0,0.0,17.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,5.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,5.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,6.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,6.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,7.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,7.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,8.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,8.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,9.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,9.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,10.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,10.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,11.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,11.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,12.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,12.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,13.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,13.0,inf,86.0,-20.0,5.0,14.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,14.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,14.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,15.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,15.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,16.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,16.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,17.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,17.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,18.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,18.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,20.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,19.0,inf,85.0,-19.0,5.0,15.0,accepted
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,19.0,inf,86.0,-20.0,5.0,14.0,accepted
+7.0,6.0,3.0,2.0,5.0,4.0,85.0,19.0,17.0,0.0,5.0,1.0,inf,85.0,-19.0,5.0,15.0,final
+7.0,6.0,3.0,5.0,4.0,2.0,80.0,19.0,29.0,0.0,5.0,1.0,inf,80.0,-19.0,2.0,20.0,final
+6.0,7.0,4.0,2.0,5.0,3.0,83.0,20.0,20.0,0.0,5.0,1.0,inf,83.0,-20.0,4.0,17.0,final
+6.0,7.0,3.0,2.0,5.0,4.0,86.0,20.0,17.0,0.0,5.0,1.0,inf,86.0,-20.0,5.0,14.0,final
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv
index 86fac51f8a..d37b1dec07 100644
--- a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv
+++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/unconstrained/simpleKnapsackTournament/PrintOptOut_1.csv
@@ -1,71 +1,71 @@
-proj1,proj2,proj3,proj4,proj5,proj6,proj7,proj8,proj9,proj10,planValue,validPlan,PointProbability,ProbabilityWeight-proj5,ProbabilityWeight-proj4,prefix,ProbabilityWeight-proj7,ProbabilityWeight-proj9,ProbabilityWeight-proj6,ProbabilityWeight-proj8,ProbabilityWeight,ProbabilityWeight-proj2,ProbabilityWeight-proj10,ProbabilityWeight-proj3,ProbabilityWeight-proj1,batchId
-0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,1,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,1
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,5,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,2,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,2
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,-1,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,-1,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,7,0.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,3,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,3
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,7,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,7,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,8,0.0,0.0009765625,0.5,0.5,4,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,4
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
-0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.0009765625,0.5,0.5,5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,0.5,5
+proj1,proj2,proj3,proj4,proj5,proj6,proj7,proj8,proj9,proj10,planValue,validPlan,ProbabilityWeight-proj7,ProbabilityWeight-proj4,PointProbability,ProbabilityWeight,ProbabilityWeight-proj6,ProbabilityWeight-proj9,ProbabilityWeight-proj2,ProbabilityWeight-proj1,ProbabilityWeight-proj8,batchId,ProbabilityWeight-proj5,ProbabilityWeight-proj10,ProbabilityWeight-proj3,prefix
+0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,5,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,4,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,1,0.5,0.5,0.5,1
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,3,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,-2,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,2,0.5,0.5,0.5,2
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,3,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,6,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,3,0.5,0.5,0.5,3
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,7,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,4,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,2,1.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,8,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,4,0.5,0.5,0.5,4
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
+0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,11,0.0,0.5,0.5,0.0009765625,1,0.5,0.5,0.5,0.5,0.5,5,0.5,0.5,0.5,5
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/tests b/tests/framework/Optimizers/GeneticAlgorithms/tests
index 8701e76540..5135a04977 100644
--- a/tests/framework/Optimizers/GeneticAlgorithms/tests
+++ b/tests/framework/Optimizers/GeneticAlgorithms/tests
@@ -374,15 +374,34 @@
rel_err = 0.001
[../]
[../]
+ [./NSGA-II_MinwoRepMultiObjective]
+ type = 'RavenFramework'
+ input = 'discrete/constrained/MinwoRepMultiObjective.xml'
+ # [./csv]
+ # type = UnorderedCSV
+ UnorderedCsv = 'discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv'
+ rel_err = 0.001
+ # [../]
+ [../]
- [./GAwithEnsembleModelHDSMconvergence]
+ [./NSGA-II_ZDT1]
type = 'RavenFramework'
- input = 'continuous/unconstrained/test_ensemble_withGA_HDSM.xml'
- [./csv]
- type = OrderedCSV
- output = 'continuous/unconstrained/ensemble_withGA_HDSM/opt_export.csv'
- rel_err = 0.001
- [../]
+ input = 'continuous/unconstrained/ZDT1.xml'
+ # [./csv]
+ # type = OrderedCSV
+ UnorderedCsv = 'continuous/unconstrained/ZDT1/opt_export_0.csv'
+ rel_err = 0.001
+ # [../]
+ [../]
+
+ [./NSGA-II_Beale]
+ type = 'RavenFramework'
+ input = 'continuous/unconstrained/MultiObjectiveBeale-Bealeflipped.xml'
+ # [./csv]
+ # type = OrderedCSV
+ UnorderedCsv = 'continuous/unconstrained/Multi_beale_bealeFlipped/opt_export_0.csv'
+ rel_err = 0.001
+ # [../]
[../]
[./GAwithEnsembleModelIncludingCode]
diff --git a/tests/framework/unit_tests/Optimizers/testFitnessBased.py b/tests/framework/unit_tests/Optimizers/testFitnessBased.py
index 0084aa028b..5fa79c77d0 100644
--- a/tests/framework/unit_tests/Optimizers/testFitnessBased.py
+++ b/tests/framework/unit_tests/Optimizers/testFitnessBased.py
@@ -92,9 +92,14 @@ def formatSample(vars):
popFitness = xr.DataArray(popFitness,
dims=['chromosome'],
coords={'chromosome': np.arange(np.shape(popFitness)[0])})
+popFitnessSet = popFitness.to_dataset(name = "test_popFitness")
popAge = [3,1,7,1]
offSprings = [[2,3,4,5,6,1],[1,3,5,2,4,6],[1,2,4,3,6,5]]
offSpringsFitness = [1.1,2.0,3.2]
+offSpringsFitness = xr.DataArray(offSpringsFitness,
+ dims=['chromosome'],
+ coords={'chromosome': np.arange(np.shape(offSpringsFitness)[0])})
+offSpringsFitnessSet = offSpringsFitness.to_dataset(name = "test_offFitness")
rlz =[]
for i in range(np.shape(offSprings)[0]):
d = {}
@@ -104,10 +109,11 @@ def formatSample(vars):
d[var] = {'dims':() ,'data': val}
rlz.append(xr.Dataset.from_dict(d))
rlz = xr.concat(rlz,dim='data')
-newPop2,newFit2,newAge2,popFitness2 = fitnessBased(rlz, age=popAge, variables=optVars, population=population, fitness=popFitness, offSpringsFitness=offSpringsFitness, popObjectiveVal=popFitness)
+newPop2,newFit2,newAge2,popFitness2 = fitnessBased(rlz, age=popAge, variables=optVars, population=population, fitness=popFitnessSet, offSpringsFitness=offSpringsFitnessSet, popObjectiveVal=popFitness)
+print('*'*39)
print('Fitness Based Selection')
-print('*'*19)
-print('new population: {}, \n new Fitness {}, \n new age'.format(newPop2,newFit2,newAge2))
+print('*'*39)
+print('1. New population:\n {}, \n2. New Fitness:\n {}, \n3. New age:\n'.format(newPop2.data,newFit2.to_dataarray(dim = 'variable', name = None)[0],newAge2))
print('Note that the second and forth chromosome had the same age, but for the age based mechanism it omitted the one with the lowest fitness')
expectedPop = xr.DataArray([[6,5,4,3,2,1],
[1,2,3,4,5,6],
@@ -119,13 +125,16 @@ def formatSample(vars):
expectedFit = xr.DataArray([9.5,7.2,3.2,2.0],
dims=['chromosome'],
coords={'chromosome':np.arange(np.shape(population)[0])})
+
+expectedFit = expectedFit.to_dataset(name = 'x1')
+
expectedAge = [8,4,0,0]
## TESTING
# Test survivor population
checkSameDataArrays('Check survived population data array',newPop2,expectedPop)
# Test survivor fitnesses
-checkSameDataArrays('Check fitness for survived population data array',newFit2,expectedFit)
+checkSameDataArrays('Check fitness for survived population data array',newFit2, expectedFit)
# Test survivor Ages
checkSameListOfInt('Check fitness for survived individuals',np.array(newAge2),np.array(expectedAge))
#
diff --git a/tests/framework/unit_tests/Optimizers/testRankSelection.py b/tests/framework/unit_tests/Optimizers/testRankSelection.py
index d3ecaf807e..c2ea374381 100644
--- a/tests/framework/unit_tests/Optimizers/testRankSelection.py
+++ b/tests/framework/unit_tests/Optimizers/testRankSelection.py
@@ -99,9 +99,12 @@ def formatSample(vars):
dims=['chromosome'],
coords={'chromosome': np.arange(np.shape(popFitness)[0])})
nParents = 2
+popFitness = popFitness.to_dataset(name = 'test_RankSelection')
+
parents = rankSelection(population, variables=optVars, fitness=popFitness, nParents=nParents)
+print('*'*39)
print('Rank based Parent Selection')
-print('*'*19)
+print('*'*39)
print('selected parents are: {}'.format(parents))
expectedParents = xr.DataArray([[3,5,6,2,1,4],
[1,2,3,4,5,6]],
diff --git a/tests/framework/unit_tests/Optimizers/testRouletteWheel.py b/tests/framework/unit_tests/Optimizers/testRouletteWheel.py
index eb5e659800..6292af6996 100644
--- a/tests/framework/unit_tests/Optimizers/testRouletteWheel.py
+++ b/tests/framework/unit_tests/Optimizers/testRouletteWheel.py
@@ -67,6 +67,8 @@ def checkSameDataArrays(comment, resultedDA, expectedDA, update=True):
popFitness = xr.DataArray(popFitness,
dims=['chromosome'],
coords={'chromosome': np.arange(np.shape(popFitness)[0])})
+popFitness = popFitness.to_dataset(name = 'test_RouletteWheel')
+
nParents = 2
parents = rouletteWheel(population, variables=optVars, fitness=popFitness, nParents=nParents)
print('Roulette Wheel Parent Selection')
diff --git a/tests/framework/unit_tests/Optimizers/testTournamentSelection.py b/tests/framework/unit_tests/Optimizers/testTournamentSelection.py
index edddaa8db4..b0131f60e2 100644
--- a/tests/framework/unit_tests/Optimizers/testTournamentSelection.py
+++ b/tests/framework/unit_tests/Optimizers/testTournamentSelection.py
@@ -94,9 +94,12 @@ def formatSample(vars):
popFitness = xr.DataArray(popFitness,
dims=['chromosome'],
coords={'chromosome': np.arange(np.shape(popFitness)[0])})
+popFitness = popFitness.to_dataset(name = 'test_TournamentSelection')
nParents = 2
-parents = tournamentSelection(population, variables=optVars, fitness=popFitness, nParents=nParents)
-print('Roulette Wheel Parent Selection')
+objVal = [10]
+kSelection = 2
+parents = tournamentSelection(population, variables=optVars, fitness=popFitness, nParents=nParents, objVal=objVal, kSelection=kSelection)
+print('Parent Selection with TournamentSelection algorithm')
print('*'*19)
print('selected parents are: {}'.format(parents))
expectedParents = xr.DataArray([[1,2,3,4,5,6],