5 DAFoam : Discrete Adjoint with OpenFOAM
9 The Python interface to DAFoam. It controls the adjoint
10 solvers and external modules for design optimization
22 from mpi4py
import MPI
23 from collections
import OrderedDict
25 from petsc4py
import PETSc
27 petsc4py.init(sys.argv)
29 import tensorflow
as tf
36 Define a set of options to use in PYDAFOAM and set their initial values.
37 This class will be used by PYDAFOAM._getDefOptions()
39 NOTE: Give an initial value for a new option, this help PYDAFOAM determine the type of this
40 option. If it is a list, give at least one default value. If it is a dict, you can leave it
41 blank, e.g., {}. Also, use ## to add comments before the new option such that these comments
42 will be picked up by Doxygen. If possible, give examples.
44 NOTE: We group these options into three categories.
45 - The basic options are those options that will be used for EVERY solvers and EVERY cases.
46 - The intermediate options are options that will be used in some of solvers for special
47 situation (e.g., primalVarBounds to prevent solution from divergence).
48 - The advanced options will be used in special situation to improve performance, e.g.,
49 maxResConLv4JacPCMat to reduce memory of preconditioner. Its usage is highly case dependent.
50 It may also include options that have a default value that is rarely changed, except for
51 very special situation.
307 "center": [0.25, 0.0, 0.0],
322 "p_rghMax": 500000.0,
343 "ReThetatMin": 1e-16,
345 "gammaIntMin": 1e-16,
367 "PCMatPrecomputeInterval": 100,
368 "PCMatUpdateInterval": 1,
370 "additionalOutput": [
"None"],
371 "readZeroFields":
True,
389 self.
useAD = {
"mode":
"reverse",
"dvName":
"None",
"seedIndex": -9999}
485 "jacMatReOrdering":
"rcm",
487 "gmresMaxIters": 1000,
488 "gmresRestart": 1000,
489 "gmresRelTol": 1.0e-6,
490 "gmresAbsTol": 1.0e-14,
491 "gmresTolDiff": 1.0e2,
492 "useNonZeroInitGuess":
False,
497 "fpMinResTolDiff": 1.0e2,
499 "dynAdjustTol":
False,
549 "simpleCoeffs": {
"n": [2, 2, 1],
"delta": 0.001},
550 "preservePatches": [
"None"],
551 "singleProcessorFaceSets": [
"None"],
561 "maxAspectRatio": 1000.0,
564 "maxIncorrectlyOrientedFaces": 0,
622 Main class for pyDAFoam
627 comm : mpi4py communicator
628 An optional argument to pass in an external communicator.
631 The list of options to use with pyDAFoam.
637 Initialize class members
640 assert not os.getenv(
"WM_PROJECT")
is None,
"$WM_PROJECT not found. Please source OpenFOAM-v1812/etc/bashrc"
645 Info(
"-------------------------------------------------------------------------------")
647 Info(
"-------------------------------------------------------------------------------")
707 if "ALL_OPENFOAM_WALL_PATCHES" in self.
getOption(
"designSurfaces"):
725 if self.
getOption(
"tensorflow")[
"active"]:
726 TensorFlowHelper.options = self.
getOption(
"tensorflow")
727 TensorFlowHelper.initialize()
729 self.
solver.initTensorFlowFuncs(
730 TensorFlowHelper.predict, TensorFlowHelper.calcJacVecProd, TensorFlowHelper.setModelName
733 TensorFlowHelper.predict, TensorFlowHelper.calcJacVecProd, TensorFlowHelper.setModelName
737 self.
solver.printAllOptions()
739 Info(
"pyDAFoam initialization done!")
743 def _solverRegistry(self):
745 Register solver names and set their types. For a new solver, first identify its
746 type and add their names to the following dict
750 "Incompressible": [
"DASimpleFoam",
"DAPimpleFoam",
"DAPimpleDyMFoam"],
751 "Compressible": [
"DARhoSimpleFoam",
"DARhoSimpleCFoam",
"DATurboFoam",
"DARhoPimpleFoam"],
752 "Solid": [
"DASolidDisplacementFoam",
"DAHeatTransferFoam"],
766 if self.
getOption(
"useAD")[
"mode"] ==
"forward":
771 if self.
getOption(
"writeMinorIterations"):
778 def _getDefOptions(self):
780 Setup default options
786 All the DAFoam options.
795 for key
in vars(daOption):
796 value = getattr(daOption, key)
797 defOpts[key] = [type(value), value]
801 def _checkOptions(self):
803 Check if the combination of options are valid.
804 NOTE: we should add all possible checks here!
807 if not self.
getOption(
"useAD")[
"mode"]
in [
"reverse",
"forward"]:
808 raise Error(
"useAD->mode only supports reverse, or forward!")
810 if "NONE" not in self.
getOption(
"writeSensMap"):
811 if not self.
getOption(
"useAD")[
"mode"]
in [
"reverse"]:
812 raise Error(
"writeSensMap is only compatible with useAD->mode=reverse")
814 if self.
getOption(
"adjEqnSolMethod") ==
"fixedPoint":
816 if self.
comm.rank == 0:
817 print(
"Fixed-point adjoint mode detected. Unset normalizeStates and normalizeResiduals...")
820 if len(self.
getOption(
"normalizeStates")) > 0:
821 raise Error(
"Please do not set any normalizeStates for the fixed-point adjoint!")
823 self.
setOption(
"normalizeResiduals", [
"None"])
827 if self.
getOption(
"discipline")
not in [
"aero",
"thermal"]:
828 raise Error(
"discipline: %s not supported. Options are: aero or thermal" % self.
getOption(
"discipline"))
831 primalBCDict = self.
getOption(
"primalBC")
832 for bcKey
in primalBCDict:
834 patches = primalBCDict[bcKey][
"patches"]
837 for patchName
in patches:
838 if patchName
not in self.boundaries.keys():
840 "primalBC-%s-patches-%s is not valid. Please use a patchName from the boundaries list: %s"
841 % (bcKey, patchName, self.boundaries.keys())
845 functionDict = self.
getOption(
"function")
846 for objKey
in functionDict:
848 patches = functionDict[objKey][
"patches"]
851 for patchName
in patches:
852 if patchName
not in self.boundaries.keys():
854 "function-%s-patches-%s is not valid. Please use a patchName from the boundaries list: %s"
855 % (objKey, patchName, self.boundaries.keys())
861 if self.
getOption(
"useAD")[
"mode"]
in [
"forward",
"reverse"]:
868 Write the adjoint variables in OpenFOAM field format for post-processing
872 if len(self.
getOption(
"function").keys()) > 1:
873 raise Error(
"writeAdjointFields supports only one function, while multiple are defined!")
878 Evaluate the desired functions given in iterable object,
884 >>> CFDsolver.evalFunctions(funcs)
885 >>> # Result will look like:
886 >>> # {'CL':0.501, 'CD':0.02750}
889 for funcName
in list(self.
getOption(
"function").keys()):
892 if self.
getOption(
"useAD")[
"mode"] ==
"forward":
893 functionValue = self.
solverAD.getTimeOpFuncVal(funcName)
895 functionValue = self.
solver.getTimeOpFuncVal(funcName)
896 funcs[funcName] = functionValue
902 Add a custom grouping of families called groupName. The groupName
903 must be distinct from the existing families. All families must
904 in the 'families' list must be present in the mesh file.
908 User-supplied custom name for the family groupings
910 List of string. Family names to combine into the family group
916 "The specified groupName '%s' already exists in the mesh file or has already been added." % groupName
925 "The specified family '%s' for group '%s', does "
926 "not exist in the mesh file or has "
927 "not already been added. The current list of "
928 "families (original and grouped) is: %s" % (fam, groupName, repr(self.
families.keys()))
936 self.
families[groupName] = sorted(np.unique(indices))
940 Set the mesh object to the aero_solver to do geometric deformations
943 mesh : MBMesh or USMesh object
944 The mesh object for doing the warping
952 self.
mesh.setExternalMeshIndices(meshInd)
957 self.
mesh.setSurfaceDefinition(pts, conn, faceSizes)
961 Return the connectivity of the coordinates at which the forces (or tractions) are
962 defined. This is the complement of getForces() which returns
963 the forces at the locations returned in this routine.
968 Group identifier to get only forces cooresponding to the
969 desired group. The group must be a family or a user-supplied
970 group of families. The default is None which corresponds to
971 all wall-type surfaces.
974 if groupName
is None:
988 bc = self.boundaries[name]
989 nPts = len(bc[
"indicesRed"])
992 nFace = len(bc[
"facesRed"])
997 for iFace
in range(nFace):
998 face = copy.copy(bc[
"facesRed"][iFace])
999 for i
in range(len(face)):
1000 face[i] += pointOffset
1002 faceSizes.append(len(face))
1006 return conn, faceSizes
1010 This function returns a trianguled verision of the surface
1011 mesh on all processors. The intent is to use this for doing
1012 constraints in DVConstraints.
1016 List of points and vectors describing the surface. This may
1017 be passed directly to DVConstraint setSurface() function.
1020 if groupName
is None:
1027 conn = np.array(conn).flatten()
1028 conn = self.
comm.allgather(conn)
1029 faceSizes = self.
comm.allgather(faceSizes)
1037 for iProc
in range(len(faceSizes)):
1040 for iFace
in range(len(faceSizes[iProc])):
1042 faceSize = faceSizes[iProc][iFace]
1043 faceNodes = conn[iProc][connCounter : connCounter + faceSize]
1047 for i
in range(faceSize):
1050 ptSum += pts[iProc][idx]
1052 avgPt = ptSum / faceSize
1057 for i
in range(faceSize):
1060 v1.append(pts[iProc][idx] - avgPt)
1061 if i < (faceSize - 1):
1062 idxp1 = faceNodes[i + 1]
1063 v2.append(pts[iProc][idxp1] - avgPt)
1067 v2.append(pts[iProc][idx0] - avgPt)
1070 connCounter += faceSize
1076 Print a nicely formatted dictionary of the family names
1080 def _initializeOptions(self, options):
1082 Initialize the options passed into pyDAFoam
1087 options : dictionary
1088 The list of options to use with pyDAFoam.
1093 raise Error(
"The 'options' keyword argument must be passed pyDAFoam.")
1102 if options[
"solverName"]
in self.
solverRegistry[
"Incompressible"]:
1115 "key %s has wrong format! \
1116 Example: {'iters' : [int, 1]}"
1126 def _initializeComm(self, comm):
1128 Initialize MPI COMM and setup parallel flags
1133 comm = MPI.COMM_WORLD
1137 nProc = self.
comm.size
1155 Deform the dynamic mesh and save to them to the disk
1158 if not self.
getOption(
"dynamicMesh")[
"active"]:
1161 Info(
"Deforming dynamic mesh")
1165 if self.
solver.hasVolCoordInput() == 0:
1170 mode = self.
getOption(
"dynamicMesh")[
"mode"]
1172 deltaT = self.
solver.getDeltaT()
1174 endTime = self.
solver.getEndTime()
1175 endTimeIndex = round(endTime / deltaT)
1178 if mode ==
"rotation":
1179 center = self.
getOption(
"dynamicMesh")[
"center"]
1180 axis = self.
getOption(
"dynamicMesh")[
"axis"]
1181 omega = self.
getOption(
"dynamicMesh")[
"omega"]
1184 points0 = np.zeros(nLocalPoints * 3)
1185 self.
solver.getOFMeshPoints(points0)
1187 self.
solver.writeMeshPoints(points0, 0.0)
1190 points = np.reshape(points0, (-1, 3))
1191 for i
in range(1, endTimeIndex + 1):
1193 dTheta = omega * deltaT
1194 dCosTheta = np.cos(dTheta)
1195 dSinTheta = np.sin(dTheta)
1197 for pointI
in range(nLocalPoints):
1200 xTemp = points[pointI][0] - center[0]
1201 yTemp = points[pointI][1] - center[1]
1203 points[pointI][0] = dCosTheta * xTemp - dSinTheta * yTemp + center[0]
1204 points[pointI][1] = dSinTheta * xTemp + dCosTheta * yTemp + center[1]
1206 raise Error(
"axis not valid! Options are: z")
1208 pointsWrite = points.flatten()
1209 self.
solver.writeMeshPoints(pointsWrite, t)
1211 raise Error(
"mode not valid! Options are: rotation")
1219 Read the dynamic mesh points saved in the folders 0.001, 0.002
1220 NOTE: if the backward scheme is used we need to read the mesh
1221 for 3 time levels to get the correct V0, V00 etc
1222 NOTE: setting the proper time index is critical because the fvMesh
1223 will use timeIndex to calculate meshPhi, V0 etc
1225 if timeVal < deltaT:
1226 raise Error(
"timeVal not valid")
1228 if ddtSchemeOrder == 1:
1231 elif ddtSchemeOrder == 2:
1233 time_2 = max(timeVal - 2 * deltaT, 0.0)
1235 index_2 = timeIndex - 2
1237 self.
solver.readMeshPoints(time_2)
1239 self.
solverAD.readMeshPoints(time_2)
1241 raise Error(
"ddtSchemeOrder not supported")
1244 time_1 = max(timeVal - deltaT, 0.0)
1245 index_1 = timeIndex - 1
1247 self.
solver.readMeshPoints(time_1)
1249 self.
solverAD.readMeshPoints(time_1)
1252 self.
solver.readMeshPoints(timeVal)
1254 self.
solverAD.readMeshPoints(timeVal)
1258 Read the state variables in to OpenFOAM's state fields
1266 t0 = timeVal - deltaT
1271 t00 = timeVal - 2 * deltaT
1281 Set solver input. If it is forward mode, we also set the seeds
1285 for inputName
in list(inputDict.keys()):
1287 if "solver" in inputDict[inputName][
"components"]:
1288 inputType = inputDict[inputName][
"type"]
1289 input = inputs[inputName]
1290 inputSize = len(input)
1291 seeds = np.zeros(inputSize)
1292 if self.
getOption(
"useAD")[
"mode"] ==
"forward":
1293 if inputType ==
"volCoord":
1294 if self.
getOption(
"useAD")[
"dvName"]
not in list(inputDict.keys()):
1297 if inputName == self.
getOption(
"useAD")[
"dvName"]:
1298 seedIndex = self.
getOption(
"useAD")[
"seedIndex"]
1299 seeds[seedIndex] = 1.0
1302 self.
solver.setSolverInput(inputName, inputType, inputSize, input, seeds)
1303 self.
solverAD.setSolverInput(inputName, inputType, inputSize, input, seeds)
1314 discipline = self.
getOption(
"discipline")
1317 raise Error(
"calcFFD2XvSeeds is call but no DVGeo object found! Call add_dvgeo in the run script!")
1319 if self.
mesh is None:
1320 raise Error(
"calcFFD2XvSeeds is call but no mesh object found!")
1322 dvName = self.
getOption(
"useAD")[
"dvName"]
1323 seedIndex = self.
getOption(
"useAD")[
"seedIndex"]
1325 xDV = DVGeo.getValues()
1330 for key
in list(xDV.keys()):
1331 xDvDot[key] = np.zeros_like(xDV[key])
1332 xDvDot[dvName][seedIndex] = 1.0
1336 xSDot0 = np.zeros_like(xs0)
1340 xSDot = DVGeo.totalSensitivityProd(xDvDot, ptSetName=
"x_%s0" % discipline).reshape(xSDot0.shape)
1342 xVDot = self.
mesh.warpDerivFwd(xSDot)
1346 def _initSolver(self):
1348 Initialize the solvers. This needs to be called before calling any runs
1352 raise Error(
"pyDAFoam: self._initSolver has been called! One shouldn't initialize solvers twice!")
1354 solverName = self.
getOption(
"solverName")
1355 solverArg = solverName +
" -python " + self.
parallelFlag
1357 from .libs.pyDASolvers
import pyDASolvers
1361 if self.
getOption(
"useAD")[
"mode"] ==
"forward":
1363 from .libs.ADF.pyDASolvers
import pyDASolvers
as pyDASolversAD
1367 elif self.
getOption(
"useAD")[
"mode"] ==
"reverse":
1369 from .libs.ADR.pyDASolvers
import pyDASolvers
as pyDASolversAD
1376 Info(
"Init solver done! ElapsedClockTime %f s" % self.
solver.getElapsedClockTime())
1377 Info(
"Init solver done! ElapsedCpuTime %f s" % self.
solver.getElapsedCpuTime())
1385 Run decomposePar to parallel run
1389 if self.
comm.size == 1:
1395 command = [
"decomposePar"]
1396 args = self.
getOption(
"decomposeParDict")[
"args"]
1402 if self.
comm.rank == 0:
1403 status = subprocess.call(command, stdout=sys.stdout, stderr=subprocess.STDOUT, shell=
False)
1406 print(
"\nUnable to run decomposePar, the domain has been already decomposed?\n", flush=
True)
1413 Delete the previous primal solution time folder
1416 solTime = self.
solver.getPrevPrimalSolTime()
1418 rootDir = os.getcwd()
1420 checkPath = os.path.join(rootDir,
"processor%d/%g" % (self.
comm.rank, solTime))
1422 checkPath = os.path.join(rootDir,
"%g" % solTime)
1424 if os.path.isdir(checkPath):
1426 shutil.rmtree(checkPath)
1428 raise Error(
"Can not delete %s" % checkPath)
1430 Info(
"Previous solution time %g found and deleted." % solTime)
1432 Info(
"Previous solution time %g not found and nothing deleted." % solTime)
1438 Rename the primal solution folder to specific format for post-processing. The renamed time has the
1439 format like 0.0001, 0.0002, etc. One can load these intermediate shapes and fields and
1440 plot them in paraview.
1441 The way it is implemented is that we sort the solution folder and consider the largest time folder
1442 as the solution folder and rename it
1447 The major interation index
1450 rootDir = os.getcwd()
1452 checkPath = os.path.join(rootDir,
"processor%d" % self.
comm.rank)
1456 latestTime = self.
solver.getLatestTime()
1458 if latestTime < 1.0:
1459 Info(
"Latest solution time %g less than 1, not renamed." % latestTime)
1461 return latestTime, renamed
1463 distTime =
"%g" % (solIndex / 1e4)
1464 targetTime =
"%g" % latestTime
1466 src = os.path.join(checkPath, targetTime)
1467 dst = os.path.join(checkPath, distTime)
1469 Info(
"Moving time %s to %s" % (targetTime, distTime))
1471 if os.path.isdir(dst):
1472 raise Error(
"%s already exists, moving failed!" % dst)
1475 shutil.move(src, dst)
1477 raise Error(
"Can not move %s to %s" % (src, dst))
1480 return distTime, renamed
1482 def _readMeshInfo(self):
1484 Initialize mesh information and read mesh information
1487 dirName = os.getcwd()
1490 self.
xv = copy.copy(self.xv0)
1496 Set the updated surface coordinates for a particular group.
1499 coordinates : numpy array
1500 Numpy array of size Nx3, where N is the number of coordinates on this processor.
1501 This array must have the same shape as the array obtained with getSurfaceCoordinates()
1503 Name of family or group of families for which to return coordinates for.
1505 if self.
mesh is None:
1508 if groupName
is None:
1512 if self.
mesh is None:
1513 raise Error(
"Cannot set new surface coordinate locations without a mesh" "warping object present.")
1525 Return the coordinates for the surfaces defined by groupName.
1530 Group identifier to get only coordinates cooresponding to
1531 the desired group. The group must be a family or a
1532 user-supplied group of families. The default is None which
1533 corresponds to all wall-type surfaces.
1537 xs: numpy array of size nPoints * 3 for surface points
1540 if groupName
is None:
1545 xs = np.zeros((npts, 3), self.
dtype)
1552 bc = self.boundaries[name]
1553 for ptInd
in bc[
"indicesRed"]:
1554 xs[counter, :] = self.
xv[ptInd]
1559 def _getSurfaceSize(self, groupName):
1561 Internal routine to return the size of a particular surface. This
1562 does *NOT* set the actual family group
1564 if groupName
is None:
1569 "'%s' is not a family in the OpenFoam Case or has not been added"
1570 " as a combination of families" % groupName
1581 bc = self.boundaries[name]
1582 nCells += len(bc[
"facesRed"])
1583 nPts += len(bc[
"indicesRed"])
1589 Assign the boundary condition defined in primalBC to the OF fields
1594 def _computeBasicFamilyInfo(self):
1596 Loop over the boundary data and compute necessary family
1597 information for the basic patches
1613 bc = self.boundaries[name]
1616 nFace = len(bc[
"faces"])
1623 for iFace
in bc[
"faces"]:
1625 face = self.faces[iFace]
1626 indices.extend(face)
1629 indices = np.unique(indices)
1633 for i
in range(len(indices)):
1634 inverseInd[indices[i]] = i
1644 for iFace
in bc[
"faces"]:
1646 face = self.faces[iFace]
1650 for j
in range(nNodes):
1652 indRed = inverseInd[indOrig]
1653 faceReduced.append(indRed)
1654 facesRed.append(faceReduced)
1657 if not (len(bc[
"faces"]) == len(facesRed)):
1658 raise Error(
"Connectivity for faces on reduced index set is not the same length as original.")
1661 bc[
"facesRed"] = facesRed
1662 bc[
"indicesRed"] = list(indices)
1665 if bc[
"type"] ==
"wall" or bc[
"type"] ==
"slip" or bc[
"type"] ==
"cyclic":
1672 Get the list of indices to pass to the mesh object for the
1677 nCoords = len(self.xv0.flatten())
1679 nCoords = self.
comm.allgather(nCoords)
1681 for i
in range(self.
comm.rank):
1682 offset += nCoords[i]
1684 meshInd = np.arange(nCoords[self.
comm.rank]) + offset
1688 def mapVector(self, vec1, groupName1, groupName2, vec2=None):
1689 """This is the main workhorse routine of everything that deals with
1690 families in pyDAFoam. The purpose of this routine is to convert a
1691 vector 'vec1' (of size Nx3) that was evaluated with
1692 'groupName1' and expand or contract it (and adjust the
1693 ordering) to produce 'vec2' evaluated on groupName2.
1695 A little ascii art might help. Consider the following "mesh"
1696 . Family 'fam1' has 9 points, 'fam2' has 10 pts and 'fam3' has
1697 5 points. Consider that we have also also added two
1698 additional groups: 'f12' containing 'fam1' and 'fma2' and a
1699 group 'f23' that contains families 'fam2' and 'fam3'. The vector
1700 we want to map is 'vec1'. It is length 9+10. All the 'x's are
1703 The call: mapVector(vec1, 'f12', 'f23')
1705 will produce the "returned vec" array, containing the
1706 significant values from 'fam2', where the two groups overlap,
1707 and the new values from 'fam3' set to zero. The values from
1708 fam1 are lost. The returned vec has size 15.
1711 |---------+----------+------|
1713 |xxxxxxxxx xxxxxxxxxx| <- vec1
1714 |xxxxxxxxxx 000000| <- returned vec (vec2)
1719 Array of size Nx3 that will be mapped to a different family set.
1722 The family group where the vector vec1 is currently defined
1725 The family group where we want to the vector to mapped into
1727 vec2 : Numpy array or None
1728 Array containing existing values in the output vector we want to keep.
1729 If this vector is not given, the values will be filled with zeros.
1734 The input vector mapped to the families defined in groupName2.
1738 "'%s' or '%s' is not a family in the mesh file or has not been added"
1739 " as a combination of families" % (groupName1, groupName2)
1743 if groupName1 == groupName2:
1748 vec2 = np.zeros((npts, 3), self.
dtype)
1750 famList1 = self.
families[groupName1]
1751 famList2 = self.
families[groupName2]
1754 This functionality is predicated on the surfaces being traversed in the
1755 same order every time. Loop over the allfamilies list, keeping track of sizes
1756 as we go and if the family is in both famLists, copy the values from vec1 to vec2.
1766 if ind
in famList1
and ind
in famList2:
1767 vec2[vec2counter : npts + vec2counter] = vec1[vec1counter : npts + vec1counter]
1778 def _readOFGrid(self, caseDir):
1780 Read in the mesh information we need to run the case using pyofm
1785 The directory containing the openFOAM Mesh files
1788 Info(
"Reading OpenFOAM mesh information...")
1790 from pyofm
import PYOFM
1793 ofm = PYOFM(comm=self.
comm)
1796 fileNames = ofm.getFileNames(caseDir, comm=self.
comm)
1799 x0 = ofm.readVolumeMeshPoints()
1802 faces = ofm.readFaceInfo()
1805 boundaries = ofm.readBoundaryInfo(faces)
1808 owners, neighbours = ofm.readCellInfo()
1810 return fileNames, x0, faces, boundaries, owners, neighbours
1814 Set a value to options.
1815 NOTE: calling this function will only change the values in self.options
1816 It will NOT change values for allOptions_ in DAOption. To make the options changes
1817 from pyDAFoam to DASolvers, call self.updateDAOption()
1822 Name of option to set. Not case sensitive
1824 Value to set. Type is checked for consistency.
1828 If self.options reads:
1831 'solverName': [str, 'DASimpleFoam'],
1832 'flowEndTime': [float, 1.0]
1834 Then, calling self.options('solverName', 'DARhoSimpleFoam') will give:
1837 'solverName': [str, 'DARhoSimpleFoam'],
1838 'flowEndTime': [float, 1.0]
1842 NOTE: if 'value' is of dict type, we will set all the subKey values in
1843 'value' dict to self.options, instead of overiding it. This works for
1844 only THREE levels of subDicts
1846 For example, if self.options reads
1849 'function': [dict, {
1851 'direction': [1.0, 0.0, 0.0],
1855 Then, calling self.setOption('function', {'name': 'CL'}) will give:
1859 'function': [dict, {
1861 'direction': [1.0, 0.0, 0.0],
1869 'function': [dict, {'name': 'CL'}]
1876 Error(
"Option '%-30s' is not a valid %s option." % (name, self.
name))
1881 raise Error(
"Option '%-35s' cannot be modified after the solver " "is created." % name)
1888 if isinstance(value, dict):
1889 for subKey1
in value:
1891 if isinstance(value[subKey1], dict):
1892 for subKey2
in value[subKey1]:
1894 if isinstance(value[subKey1][subKey2], dict):
1895 for subKey3
in value[subKey1][subKey2]:
1896 self.
options[name][1][subKey1][subKey2][subKey3] = value[subKey1][subKey2][subKey3]
1898 self.
options[name][1][subKey1][subKey2] = value[subKey1][subKey2]
1901 self.
options[name][1][subKey1] = value[subKey1]
1908 "Datatype for Option %-35s was not valid \n "
1909 "Expected data type is %-47s \n "
1910 "Received data type is %-47s" % (name, self.
defaultOptions[name][0], type(value))
1913 def _initOption(self, name, value):
1915 Set a value to options. This function will be used only for initializing the options internally.
1916 Do NOT call this function from the run script!
1921 Name of option to set. Not case sensitive
1923 Value to set. Type is checked for consistency.
1929 Error(
"Option '%-30s' is not a valid %s option." % (name, self.
name))
1934 raise Error(
"Option '%-35s' cannot be modified after the solver " "is created." % name)
1940 if isinstance(value, dict):
1941 for subKey
in value:
1943 self.
options[name][1][subKey] = value[subKey]
1950 "Datatype for Option %-35s was not valid \n "
1951 "Expected data type is %-47s \n "
1952 "Received data type is %-47s" % (name, self.
defaultOptions[name][0], type(value))
1957 Get the number of regression model parameters
1964 Get a value from options
1969 Name of option to get. Not case sensitive
1974 Return the value of the option.
1980 raise Error(
"%s is not a valid option name." % name)
1984 Update the allOptions_ in DAOption based on the latest self.options in
1985 pyDAFoam. This will pass the changes of self.options from pyDAFoam
1986 to DASolvers. NOTE: need to call this function after calling
1991 raise Error(
"self._initSolver not called!")
1995 if self.
getOption(
"useAD")[
"mode"]
in [
"forward",
"reverse"]:
2000 Get number of local adjoint states
2006 Get number of local points
2012 Return the adjoint state array owns by this processor
2016 states = np.zeros(nLocalStateSize, self.
dtype)
2017 self.
solver.getOFFields(states)
2023 Set the state to the OpenFOAM field
2026 self.
solver.updateOFFields(states)
2027 self.
solverAD.updateOFFields(states)
2033 Set the vol_coords to the OpenFOAM field
2036 self.
solver.updateOFMesh(vol_coords)
2037 self.
solverAD.updateOFMesh(vol_coords)
2043 Assign the values from array1 to vec
2048 Istart, Iend = vec.getOwnershipRange()
2050 if (Iend - Istart) != size:
2051 raise Error(
"array and vec's sizes are not consistent")
2053 for i
in range(Istart, Iend):
2055 vec[i] = array1[iRel]
2062 Assign the values from vec to array1
2067 Istart, Iend = vec.getOwnershipRange()
2069 if (Iend - Istart) != size:
2070 raise Error(
"array and vec's sizes are not consistent")
2072 for i
in range(Istart, Iend):
2074 array1[iRel] = vec[i]
2078 Convert a Petsc vector to numpy array
2081 Istart, Iend = vec.getOwnershipRange()
2082 size = Iend - Istart
2083 array1 = np.zeros(size, self.
dtype)
2084 for i
in range(Istart, Iend):
2086 array1[iRel] = vec[i]
2091 Convert a numpy array to Petsc vector
2095 vec = PETSc.Vec().create(PETSc.COMM_WORLD)
2096 vec.setSizes((size, PETSc.DECIDE), bsize=1)
2097 vec.setFromOptions()
2100 Istart, Iend = vec.getOwnershipRange()
2101 for i
in range(Istart, Iend):
2103 vec[i] = array1[iRel]
2110 def _getImmutableOptions(self):
2112 We define the list of options that *cannot* be changed after the
2113 object is created. pyDAFoam will raise an error if a user tries to
2114 change these. The strings for these options are placed in a set
2119 def _writeDecomposeParDict(self):
2121 Write system/decomposeParDict
2123 if self.
comm.rank == 0:
2126 workingDirectory = os.getcwd()
2128 varDir = os.path.join(workingDirectory, sysDir)
2129 fileName =
"decomposeParDict"
2130 fileLoc = os.path.join(varDir, fileName)
2131 f = open(fileLoc,
"w")
2135 decomDict = self.
getOption(
"decomposeParDict")
2136 n = decomDict[
"simpleCoeffs"][
"n"]
2137 f.write(
"numberOfSubdomains %d;\n" % self.
nProcs)
2139 f.write(
"method %s;\n" % decomDict[
"method"])
2141 f.write(
"simpleCoeffs \n")
2143 f.write(
" n (%d %d %d);\n" % (n[0], n[1], n[2]))
2144 f.write(
" delta %g;\n" % decomDict[
"simpleCoeffs"][
"delta"])
2147 f.write(
"distributed false;\n")
2149 f.write(
"roots();\n")
2150 if len(decomDict[
"preservePatches"]) == 1
and decomDict[
"preservePatches"][0] ==
"None":
2154 f.write(
"preservePatches (")
2155 for pPatch
in decomDict[
"preservePatches"]:
2156 f.write(
"%s " % pPatch)
2158 if decomDict[
"singleProcessorFaceSets"][0] !=
"None":
2159 f.write(
"singleProcessorFaceSets (")
2160 for pPatch
in decomDict[
"singleProcessorFaceSets"]:
2161 f.write(
" (%s -1) " % pPatch)
2164 f.write(
"// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n")
2169 def _writeOpenFoamHeader(self, f, className, location, objectName):
2171 Write OpenFOAM header file
2174 f.write(
"/*--------------------------------*- C++ -*---------------------------------*\ \n")
2175 f.write(
"| ======== | | \n")
2176 f.write(
"| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox | \n")
2177 f.write(
"| \\ / O peration | Version: v1812 | \n")
2178 f.write(
"| \\ / A nd | Web: www.OpenFOAM.com | \n")
2179 f.write(
"| \\/ M anipulation | | \n")
2180 f.write(
"\*--------------------------------------------------------------------------*/ \n")
2181 f.write(
"FoamFile\n")
2183 f.write(
" version 2.0;\n")
2184 f.write(
" format ascii;\n")
2185 f.write(
" class %s;\n" % className)
2186 f.write(
' location "%s";\n' % location)
2187 f.write(
" object %s;\n" % objectName)
2189 f.write(
"// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n")
2195 Format the error message in a box to make it clear this
2196 was a expliclty raised exception.
2200 msg =
"\n+" +
"-" * 78 +
"+" +
"\n" +
"| pyDAFoam Error: "
2202 for word
in message.split():
2203 if len(word) + i + 1 > 78:
2204 msg +=
" " * (78 - i) +
"|\n| " + word +
" "
2205 i = 1 + len(word) + 1
2209 msg +=
" " * (78 - i) +
"|\n" +
"+" +
"-" * 78 +
"+" +
"\n"
2210 print(msg, flush=
True)
2211 Exception.__init__(self)
2218 Print information and flush to screen for parallel cases
2222 if MPI.COMM_WORLD.rank == 0:
2223 print(message, flush=
True)
2224 MPI.COMM_WORLD.Barrier()
2229 TensorFlow helper class.
2230 NOTE: this is a static class because the callback function
2231 does not accept non-static class members (seg fault)
2240 predictBatchSize = {}
2247 Initialize parameters and load models
2249 Info(
"Initializing the TensorFlowHelper")
2250 for key
in list(TensorFlowHelper.options.keys()):
2253 TensorFlowHelper.predictBatchSize[modelName] = TensorFlowHelper.options[modelName][
"predictBatchSize"]
2254 TensorFlowHelper.nInputs[modelName] = TensorFlowHelper.options[modelName][
"nInputs"]
2255 TensorFlowHelper.model[modelName] = tf.keras.models.load_model(modelName)
2260 Set the model name from the C++ to Python layer
2262 TensorFlowHelper.modelName = modelName.decode()
2267 Calculate the outputs based on the inputs using the saved model
2270 modelName = TensorFlowHelper.modelName
2271 nInputs = TensorFlowHelper.nInputs[modelName]
2273 inputs_tf = np.reshape(inputs, (-1, nInputs))
2274 batchSize = TensorFlowHelper.predictBatchSize[modelName]
2275 outputs_tf = TensorFlowHelper.model[modelName].
predict(inputs_tf, verbose=
False, batch_size=batchSize)
2278 outputs[i] = outputs_tf[i, 0]
2283 Calculate the gradients of the outputs wrt the inputs
2286 modelName = TensorFlowHelper.modelName
2287 nInputs = TensorFlowHelper.nInputs[modelName]
2289 inputs_tf = np.reshape(inputs, (-1, nInputs))
2290 inputs_tf_var = tf.Variable(inputs_tf, dtype=tf.float32)
2292 with tf.GradientTape()
as tape:
2293 outputs_tf = TensorFlowHelper.model[modelName](inputs_tf_var)
2295 gradients_tf = tape.gradient(outputs_tf, inputs_tf_var)
2297 for i
in range(gradients_tf.shape[0]):
2298 for j
in range(gradients_tf.shape[1]):
2299 idx = i * gradients_tf.shape[1] + j
2300 inputs_b[idx] = gradients_tf.numpy()[i, j] * outputs_b[i]