DARegression.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2 
3  DAFoam : Discrete Adjoint with OpenFOAM
4  Version : v4
5 
6 \*---------------------------------------------------------------------------*/
7 
8 #include "DARegression.H"
9 
10 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
11 
12 namespace Foam
13 {
14 
15 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
16 
17 DARegression::DARegression(
18  const fvMesh& mesh,
19  const DAOption& daOption,
20  const DAModel& daModel)
21  : regIOobject(
22  IOobject(
23  "DARegression", // the db name
24  mesh.time().timeName(),
25  mesh, // register to mesh
26  IOobject::NO_READ,
27  IOobject::NO_WRITE,
28  true // always register object
29  )),
30  mesh_(mesh),
31  daOption_(daOption),
32  daModel_(daModel)
33 {
34  dictionary regSubDict = daOption.getAllOptions().subDict("regressionModel");
35  active_ = regSubDict.getLabel("active");
36 
37  // initialize parameters
38  if (active_)
39  {
40  forAll(regSubDict.toc(), idxI)
41  {
42  word key = regSubDict.toc()[idxI];
43  if (key != "active")
44  {
45  modelNames_.append(key);
46  }
47  }
48 
49  forAll(modelNames_, idxI)
50  {
51  word modelName = modelNames_[idxI];
52  dictionary modelSubDict = daOption.getAllOptions().subDict("regressionModel").subDict(modelName);
53 
54  modelType_.set(modelName, modelSubDict.getWord("modelType"));
55 
56  wordList tempWordList;
57  scalarList tempScalarList;
58  labelList tempLabelList;
59 
60  modelSubDict.readEntry<wordList>("inputNames", tempWordList);
61  inputNames_.set(modelName, tempWordList);
62 
63  outputName_.set(modelName, modelSubDict.getWord("outputName"));
64 
65  modelSubDict.readEntry<scalarList>("inputShift", tempScalarList);
66  inputShift_.set(modelName, tempScalarList);
67 
68  modelSubDict.readEntry<scalarList>("inputScale", tempScalarList);
69  inputScale_.set(modelName, tempScalarList);
70 
71  outputShift_.set(modelName, modelSubDict.getScalar("outputShift"));
72 
73  outputScale_.set(modelName, modelSubDict.getScalar("outputScale"));
74 
75  outputUpperBound_.set(modelName, modelSubDict.getScalar("outputUpperBound"));
76 
77  outputLowerBound_.set(modelName, modelSubDict.getScalar("outputLowerBound"));
78 
79  defaultOutputValue_.set(modelName, modelSubDict.getScalar("defaultOutputValue"));
80 
81  printInputInfo_.set(modelName, modelSubDict.getLabel("printInputInfo"));
82 
83  writeFeatures_.set(modelName, modelSubDict.lookupOrDefault<label>("writeFeatures", 0));
84 
85  if (modelType_[modelName] == "neuralNetwork")
86  {
87  modelSubDict.readEntry<labelList>("hiddenLayerNeurons", tempLabelList);
88  hiddenLayerNeurons_.set(modelName, tempLabelList);
89  activationFunction_.set(modelName, modelSubDict.getWord("activationFunction"));
90  if (activationFunction_[modelName] == "relu")
91  {
92  leakyCoeff_.set(modelName, modelSubDict.lookupOrDefault<scalar>("leakyCoeff", 0.0));
93  }
94  }
95  else if (modelType_[modelName] == "radialBasisFunction")
96  {
97  nRBFs_.set(modelName, modelSubDict.getLabel("nRBFs"));
98  }
99  else if (modelType_[modelName] == "externalTensorFlow")
100  {
101  useExternalModel_ = 1;
102  // here the array size is chosen based on the regModel that has the largest number of inputs
103  // this is important because we support multiple regModels but we don't want to create multiple featuresFlattenArray_
104  if (inputNames_[modelName].size() * mesh_.nCells() > featuresFlattenArraySize_)
105  {
106  featuresFlattenArraySize_ = inputNames_[modelName].size() * mesh_.nCells();
107  }
108  }
109  else
110  {
111  FatalErrorIn("DARegression") << "modelType_: " << modelType_[modelName] << " not supported. Options are: neuralNetwork, radialBasisFunction, and externalTensorFlow" << abort(FatalError);
112  }
113 
114  // check the sizes
115  if (inputNames_[modelName].size() != inputShift_[modelName].size()
116  || inputNames_[modelName].size() != inputScale_[modelName].size())
117  {
118  FatalErrorIn("DARegression") << "inputNames has different sizes than inputShift or inputScale" << abort(FatalError);
119  }
120 
121  label nParameters = this->nParameters(modelName);
122  scalarList parameters(nParameters, 0.0);
123  parameters_.set(modelName, parameters);
124 
125  // initialize the ptr scalarFields
126  label nInputs = inputNames_[modelName].size();
127  PtrList<volScalarField> features(nInputs);
128  forAll(inputNames_[modelName], idxI)
129  {
130  word inputName = inputNames_[modelName][idxI];
131  features.set(
132  idxI,
133  new volScalarField(
134  IOobject(
135  inputName,
136  mesh_.time().timeName(),
137  mesh_,
138  IOobject::NO_READ,
139  IOobject::NO_WRITE),
140  mesh_,
141  dimensionedScalar(inputName, dimensionSet(0, 0, 0, 0, 0, 0, 0), 0.0)));
142  }
143  features_.set(modelName, features);
144  }
145 
146  // if external model is used, initialize space for the features and output arrays
147  if (useExternalModel_)
148  {
149 
150 #if defined(CODI_ADF)
151  featuresFlattenArrayDouble_ = new double[featuresFlattenArraySize_];
152  outputFieldArrayDouble_ = new double[mesh_.nCells()];
153 #else
155  outputFieldArray_ = new scalar[mesh_.nCells()];
156 #endif
157  }
158  }
159 }
160 
162 {
163  /*
164  Description:
165  Calculate the input features. Here features is a unique list for all the regModel's inputNames.
166  This is because two regModel's inputNames can have overlap, so we don't want to compute
167  duplicated features
168 
169  NOTE: if a feature is a ratio between variable A and variable B, we will normalize
170  it such that the range of this feature is from -1 to 1 by using:
171  feature = A / (A + B + 1e-16)
172  */
173 
174  forAll(features_[modelName], idxI)
175  {
176  word inputName = inputNames_[modelName][idxI];
177  if (inputName == "VoS")
178  {
179  // vorticity / strain
180  const volVectorField& U = mesh_.thisDb().lookupObject<volVectorField>("U");
181  const tmp<volTensorField> tgradU(fvc::grad(U));
182  const volTensorField& gradU = tgradU();
183  volScalarField magOmega = mag(skew(gradU));
184  volScalarField magS = mag(symm(gradU));
185  forAll(features_[modelName][idxI], cellI)
186  {
187  features_[modelName][idxI][cellI] = (magOmega[cellI] / (magS[cellI] + magOmega[cellI] + 1e-16) + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
188  }
189  features_[modelName][idxI].correctBoundaryConditions();
190  }
191  else if (inputName == "PoD")
192  {
193  // production / destruction
194  daModel_.getTurbProdOverDestruct(features_[modelName][idxI]);
195  forAll(features_[modelName][idxI], cellI)
196  {
197  features_[modelName][idxI][cellI] = (features_[modelName][idxI][cellI] + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
198  }
199  features_[modelName][idxI].correctBoundaryConditions();
200  }
201  else if (inputName == "chiSA")
202  {
203  // the chi() function from SA
204  const volScalarField& nuTilda = mesh_.thisDb().lookupObject<volScalarField>("nuTilda");
205  volScalarField nu = daModel_.getDATurbulenceModel().nu();
206  forAll(features_[modelName][idxI], cellI)
207  {
208  features_[modelName][idxI][cellI] = (nuTilda[cellI] / (nu[cellI] + nuTilda[cellI] + 1e-16) + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
209  }
210  features_[modelName][idxI].correctBoundaryConditions();
211  }
212  else if (inputName == "pGradStream")
213  {
214  // pressure gradient along stream
215  const volScalarField& p = mesh_.thisDb().lookupObject<volScalarField>("p");
216  const volVectorField& U = mesh_.thisDb().lookupObject<volVectorField>("U");
217  volVectorField pGrad("gradP", fvc::grad(p));
218  volScalarField pG_denominator(mag(U) * mag(pGrad) + mag(U & pGrad));
219  forAll(pG_denominator, cellI)
220  {
221  pG_denominator[cellI] += 1e-16;
222  }
223  volScalarField::Internal pGradAlongStream = (U() & pGrad()) / pG_denominator();
224 
225  forAll(features_[modelName][idxI], cellI)
226  {
227  features_[modelName][idxI][cellI] = (pGradAlongStream[cellI] + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
228  }
229  features_[modelName][idxI].correctBoundaryConditions();
230  }
231  else if (inputName == "PSoSS")
232  {
233  // pressure normal stress over shear stress
234  const volScalarField& p = mesh_.thisDb().lookupObject<volScalarField>("p");
235  const volVectorField& U = mesh_.thisDb().lookupObject<volVectorField>("U");
236  const tmp<volTensorField> tgradU(fvc::grad(U));
237  const volTensorField& gradU = tgradU();
238  volVectorField pGrad("gradP", fvc::grad(p));
239  vector diagUGrad = vector::zero;
240  scalar val = 0;
241  forAll(features_[modelName][idxI], cellI)
242  {
243  diagUGrad[0] = gradU[cellI].xx();
244  diagUGrad[1] = gradU[cellI].yy();
245  diagUGrad[2] = gradU[cellI].zz();
246  val = mag(pGrad[cellI]) / (mag(pGrad[cellI]) + mag(3.0 * cmptAv(U[cellI] & diagUGrad)) + 1e-16);
247  features_[modelName][idxI][cellI] = (val + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
248  }
249  features_[modelName][idxI].correctBoundaryConditions();
250  }
251  else if (inputName == "SCurv")
252  {
253  // streamline curvature
254  const volVectorField& U = mesh_.thisDb().lookupObject<volVectorField>("U");
255  const tmp<volTensorField> tgradU(fvc::grad(U));
256  const volTensorField& gradU = tgradU();
257 
258  scalar val = 0;
259  forAll(features_[modelName][idxI], cellI)
260  {
261  val = mag(U[cellI] & gradU[cellI]) / (mag(U[cellI] & U[cellI]) + mag(U[cellI] & gradU[cellI]) + 1e-16);
262  features_[modelName][idxI][cellI] = (val + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
263  }
264  features_[modelName][idxI].correctBoundaryConditions();
265  }
266  else if (inputName == "UOrth")
267  {
268  // Non-orthogonality between velocity and its gradient
269  const volVectorField& U = mesh_.thisDb().lookupObject<volVectorField>("U");
270  const tmp<volTensorField> tgradU(fvc::grad(U));
271  const volTensorField& gradU = tgradU();
272 
273  scalar val = 0;
274  forAll(features_[modelName][idxI], cellI)
275  {
276  val = mag(U[cellI] & gradU[cellI] & U[cellI]) / (mag(U[cellI]) * mag(gradU[cellI] & U[cellI]) + mag(U[cellI] & gradU[cellI] & U[cellI]) + 1e-16);
277  features_[modelName][idxI][cellI] = (val + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
278  }
279  features_[modelName][idxI].correctBoundaryConditions();
280  }
281  else if (inputName == "KoU2")
282  {
283  // turbulence intensity / velocity square
284  const volScalarField& k = mesh_.thisDb().lookupObject<volScalarField>("k");
285  const volVectorField& U = mesh_.thisDb().lookupObject<volVectorField>("U");
286  scalar val = 0;
287  forAll(features_[modelName][idxI], cellI)
288  {
289  val = k[cellI] / (0.5 * (U[cellI] & U[cellI]) + k[cellI] + 1e-16);
290  features_[modelName][idxI][cellI] = (val + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
291  }
292  features_[modelName][idxI].correctBoundaryConditions();
293  }
294  else if (inputName == "ReWall")
295  {
296  // wall distance based Reynolds number
297  const volScalarField& y = mesh_.thisDb().lookupObject<volScalarField>("yWall");
298  const volScalarField& k = mesh_.thisDb().lookupObject<volScalarField>("k");
299  volScalarField nu = daModel_.getDATurbulenceModel().nu();
300  scalar val = 0;
301  forAll(features_[modelName][idxI], cellI)
302  {
303  val = sqrt(k[cellI]) * y[cellI] / (50.0 * nu[cellI] + sqrt(k[cellI]) * y[cellI] + 1e-16);
304  features_[modelName][idxI][cellI] = (val + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
305  }
306  features_[modelName][idxI].correctBoundaryConditions();
307  }
308  else if (inputName == "CoP")
309  {
310  // convective / production
311  daModel_.getTurbConvOverProd(features_[modelName][idxI]);
312  forAll(features_[modelName][idxI], cellI)
313  {
314  features_[modelName][idxI][cellI] = (features_[modelName][idxI][cellI] + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
315  }
316  features_[modelName][idxI].correctBoundaryConditions();
317  }
318  else if (inputName == "TauoK")
319  {
320  // ratio of total to normal Reynolds stress
321  const volScalarField& k = mesh_.thisDb().lookupObject<volScalarField>("k");
322  const volScalarField& nut = mesh_.thisDb().lookupObject<volScalarField>("nut");
323  const volVectorField& U = mesh_.thisDb().lookupObject<volVectorField>("U");
324  volSymmTensorField tau(2.0 / 3.0 * I * k - nut * twoSymm(fvc::grad(U)));
325  scalar val = 0;
326  forAll(features_[modelName][idxI], cellI)
327  {
328  val = mag(tau[cellI]) / (k[cellI] + mag(tau[cellI]) + 1e-16);
329  features_[modelName][idxI][cellI] = (val + inputShift_[modelName][idxI]) * inputScale_[modelName][idxI];
330  }
331  features_[modelName][idxI].correctBoundaryConditions();
332  }
333  else
334  {
335  FatalErrorIn("") << "inputName: " << inputName << " not supported. Options are: VoS, PoD, chiSA, pGradStream, PSoSS, SCurv, UOrth, KoU2, ReWall, CoP, TauoK" << abort(FatalError);
336  }
337  }
338 }
339 
341 {
342  /*
343  Description:
344  Calculate the prescribed output field based on the prescribed input fields using a regression model.
345  We support only neural network at this moment
346 
347  Input:
348  parameters_:
349  the parameters for the regression. For the neural network model, these parameters
350  are the weights and biases. We order them in an 1D array, starting from the first input's weight and biases.
351 
352  daOption Dict: regressionModel
353  inputNames: a list of volScalarFields saved in the features_ pointerList
354  hiddenLayerNeurons: number of neurons for each hidden layer.
355  example: {5, 3, 4} means three hidden layers with 5, 3, and 4 neurons.
356 
357  Output:
358 
359  Return 1 if there is invalid value in the output. Return 0 if successful
360 
361  Output:
362  a volScalarField prescribed by outputName
363  */
364 
365  if (!active_)
366  {
367  return 0;
368  }
369 
370  label fail = 0;
371 
372  forAll(modelNames_, idxI)
373  {
374  word modelName = modelNames_[idxI];
375 
376  // compute the inputFeature for all inputs
377  this->calcInputFeatures(modelName);
378 
379  // if the output variable is not found in the Db, just return and do nothing
380  if (!mesh_.thisDb().foundObject<volScalarField>(outputName_[modelName]))
381  {
382  return 0;
383  }
384 
385  volScalarField& outputField = const_cast<volScalarField&>(mesh_.thisDb().lookupObject<volScalarField>(outputName_[modelName]));
386 
387  if (modelType_[modelName] == "neuralNetwork")
388  {
389  label nHiddenLayers = hiddenLayerNeurons_[modelName].size();
390  List<List<scalar>> layerVals;
391  layerVals.setSize(nHiddenLayers);
392  for (label layerI = 0; layerI < nHiddenLayers; layerI++)
393  {
394  label nNeurons = hiddenLayerNeurons_[modelName][layerI];
395  layerVals[layerI].setSize(nNeurons);
396  }
397 
398  forAll(mesh_.cells(), cellI)
399  {
400  label counterI = 0;
401 
402  for (label layerI = 0; layerI < nHiddenLayers; layerI++)
403  {
404  label nNeurons = hiddenLayerNeurons_[modelName][layerI];
405  forAll(layerVals[layerI], neuronI)
406  {
407  layerVals[layerI][neuronI] = 0.0;
408  }
409  for (label neuronI = 0; neuronI < nNeurons; neuronI++)
410  {
411  if (layerI == 0)
412  {
413  // for the 1st hidden layer, we use the input layer as the input
414  forAll(inputNames_[modelName], neuronJ)
415  {
416  // weighted sum
417  layerVals[layerI][neuronI] += features_[modelName][neuronJ][cellI] * parameters_[modelName][counterI];
418  counterI++;
419  }
420  }
421  else
422  {
423  // for the rest of hidden layer, we use the previous hidden layer as the input
424  forAll(layerVals[layerI - 1], neuronJ)
425  {
426  // weighted sum
427  layerVals[layerI][neuronI] += layerVals[layerI - 1][neuronJ] * parameters_[modelName][counterI];
428  counterI++;
429  }
430  }
431  // bias
432  layerVals[layerI][neuronI] += parameters_[modelName][counterI];
433  counterI++;
434  // activation function
435  if (activationFunction_[modelName] == "sigmoid")
436  {
437  layerVals[layerI][neuronI] = 1 / (1 + exp(-layerVals[layerI][neuronI]));
438  }
439  else if (activationFunction_[modelName] == "tanh")
440  {
441  layerVals[layerI][neuronI] = (1 - exp(-2 * layerVals[layerI][neuronI])) / (1 + exp(-2 * layerVals[layerI][neuronI]));
442  }
443  else if (activationFunction_[modelName] == "relu")
444  {
445  if (layerVals[layerI][neuronI] < 0)
446  {
447  layerVals[layerI][neuronI] = leakyCoeff_[modelName] * layerVals[layerI][neuronI];
448  }
449  }
450  else
451  {
452  FatalErrorIn("") << "activationFunction not valid. Options are: sigmoid, tanh, and relu" << abort(FatalError);
453  }
454  }
455  }
456  // final output layer, we have only one output
457  scalar outputVal = 0.0;
458  forAll(layerVals[nHiddenLayers - 1], neuronJ)
459  {
460  // weighted sum
461  outputVal += layerVals[nHiddenLayers - 1][neuronJ] * parameters_[modelName][counterI];
462  counterI++;
463  }
464  // bias
465  outputVal += parameters_[modelName][counterI];
466 
467  // no activation function for the output layer
468 
469  outputField[cellI] = outputScale_[modelName] * (outputVal + outputShift_[modelName]);
470  }
471 
472  // check if the output values are valid otherwise fix/bound them
473  fail += this->checkOutput(modelName, outputField);
474 
475  outputField.correctBoundaryConditions();
476  }
477  else if (modelType_[modelName] == "radialBasisFunction")
478  {
479  label nInputs = inputNames_[modelName].size();
480 
481  // increment of the parameters for each RBF basis
482  label dP = 2 * nInputs + 1;
483 
484  forAll(mesh_.cells(), cellI)
485  {
486  scalar outputVal = 0.0;
487  for (label i = 0; i < nRBFs_[modelName]; i++)
488  {
489  scalar expCoeff = 0.0;
490  for (label j = 0; j < nInputs; j++)
491  {
492  scalar A = (features_[modelName][j][cellI] - parameters_[modelName][dP * i + 2 * j]) * (features_[modelName][j][cellI] - parameters_[modelName][dP * i + 2 * j]);
493  scalar B = 2 * parameters_[modelName][dP * i + 2 * j + 1] * parameters_[modelName][dP * i + 2 * j + 1];
494  expCoeff += A / B;
495  }
496  outputVal += parameters_[modelName][dP * i + dP - 1] * exp(-expCoeff);
497  }
498 
499  outputField[cellI] = outputScale_[modelName] * (outputVal + outputShift_[modelName]);
500  }
501 
502  // check if the output values are valid otherwise fix/bound them
503  fail += this->checkOutput(modelName, outputField);
504 
505  outputField.correctBoundaryConditions();
506  }
507  else if (modelType_[modelName] == "externalTensorFlow")
508  {
509  label nInputs = inputNames_[modelName].size();
510  label nCells = mesh_.nCells();
511 
513 
514  // NOTE: forward mode not supported..
515 #if defined(CODI_ADR)
516  Info << "WARNINGN...... Regression model for ADR is not implemented..." << endl;
517  /*
518  // assign features_ to featuresFlattenArray_
519  // here featuresFlattenArray_ should be order like this to facilitate Python layer reshape:
520  // [(cell1, feature1), (cell1, feature2), ... (cell2, feature1), (cell2, feature2) ... ]
521  label counterI = 0;
522  // loop over all features
523  forAll(features_[modelName], idxI)
524  {
525  // loop over all cells
526  forAll(features_[modelName][idxI], cellI)
527  {
528  counterI = cellI * nCells + idxI;
529  featuresFlattenArray_[counterI] = features_[modelName][idxI][cellI];
530  }
531  }
532  // assign outputField to outputFieldArray_
533  forAll(outputField, cellI)
534  {
535  outputFieldArray_[cellI] = outputField[cellI];
536  }
537 
538  // we need to use the external function helper from CoDiPack to propagate the AD
539  codi::ExternalFunctionHelper<codi::RealReverse> externalFunc;
540  for (label i = 0; i < mesh_.nCells() * nInputs; i++)
541  {
542  externalFunc.addInput(featuresFlattenArray_[i]);
543  }
544 
545  for (label i = 0; i < mesh_.nCells(); i++)
546  {
547  externalFunc.addOutput(outputFieldArray_[i]);
548  }
549 
550  externalFunc.callPrimalFunc(DARegression::betaCompute);
551 
552  codi::RealReverse::Tape& tape = codi::RealReverse::getTape();
553 
554  if (tape.isActive())
555  {
556  externalFunc.addToTape(DARegression::betaJacVecProd);
557  }
558 
559  forAll(outputField, cellI)
560  {
561  outputField[cellI] = outputFieldArray_[cellI];
562  }
563  */
564 
565 #elif defined(CODI_ADF)
566  Info << "WARNINGN...... Regression model for ADF is not implemented..." << endl;
567  /*
568  // assign features_ to featuresFlattenArray_
569  // here featuresFlattenArray_ should be order like this to facilitate Python layer reshape:
570  // [(cell1, feature1), (cell1, feature2), ... (cell2, feature1), (cell2, feature2) ... ]
571  label counterI = 0;
572  // loop over all features
573  forAll(features_[modelName], idxI)
574  {
575  // loop over all cells
576  forAll(features_[modelName][idxI], cellI)
577  {
578  counterI = cellI * nCells + idxI;
579  featuresFlattenArrayDouble_[counterI] = features_[modelName][idxI][cellI].getValue();
580  }
581  }
582  // assign outputField to outputFieldArrayDouble_
583  forAll(outputField, cellI)
584  {
585  outputFieldArrayDouble_[cellI] = outputField[cellI].value();
586  }
587 
588  // python callback function
589  DAUtility::pyCalcBetaInterface(
590  featuresFlattenArrayDouble_, mesh_.nCells() * nInputs, outputFieldArrayDouble_, mesh_.nCells(), DAUtility::pyCalcBeta);
591 
592  forAll(outputField, cellI)
593  {
594  outputField[cellI] = outputFieldArrayDouble_[cellI];
595  }
596  */
597 
598 #else
599  // assign features_ to featuresFlattenArray_
600  // here featuresFlattenArray_ should be order like this to facilitate Python layer reshape:
601  // [(cell1, feature1), (cell1, feature2), ... (cell2, feature1), (cell2, feature2) ... ]
602  label counterI = 0;
603  // loop over all features
604  forAll(features_[modelName], idxI)
605  {
606  // loop over all cells
607  forAll(features_[modelName][idxI], cellI)
608  {
609  counterI = cellI * nInputs + idxI;
610  featuresFlattenArray_[counterI] = features_[modelName][idxI][cellI];
611  }
612  }
613  // assign outputField to outputFieldArray_
614  forAll(outputField, cellI)
615  {
616  outputFieldArray_[cellI] = outputField[cellI];
617  }
618 
619  // python callback function
622 
623  forAll(outputField, cellI)
624  {
625  outputField[cellI] = outputFieldArray_[cellI];
626  }
627 #endif
628  }
629  else
630  {
631  FatalErrorIn("") << "modelType_: " << modelType_ << " not supported. Options are: neuralNetwork, radialBasisFunction, and externalTensorFlow" << abort(FatalError);
632  }
633  }
634 
635  return fail;
636 }
637 
638 label DARegression::nParameters(word modelName)
639 {
640  /*
641  Description:
642  get the number of parameters
643  */
644 
645  if (!active_)
646  {
647  FatalErrorIn("") << "nParameters() is called but the regression model is not active!" << abort(FatalError);
648  }
649 
650  if (modelType_[modelName] == "neuralNetwork")
651  {
652  label nHiddenLayers = hiddenLayerNeurons_[modelName].size();
653  label nInputs = inputNames_[modelName].size();
654 
655  // add weights
656  // input
657  label nParameters = nInputs * hiddenLayerNeurons_[modelName][0];
658  // hidden layers
659  for (label layerI = 1; layerI < hiddenLayerNeurons_[modelName].size(); layerI++)
660  {
661  nParameters += hiddenLayerNeurons_[modelName][layerI] * hiddenLayerNeurons_[modelName][layerI - 1];
662  }
663  // output
664  nParameters += hiddenLayerNeurons_[modelName][nHiddenLayers - 1] * 1;
665 
666  // add biases
667  // add hidden layers
668  for (label layerI = 0; layerI < hiddenLayerNeurons_[modelName].size(); layerI++)
669  {
670  nParameters += hiddenLayerNeurons_[modelName][layerI];
671  }
672  // add output layer
673  nParameters += 1;
674 
675  return nParameters;
676  }
677  else if (modelType_[modelName] == "radialBasisFunction")
678  {
679  label nInputs = inputNames_[modelName].size();
680 
681  // each RBF has a weight, nInputs mean, and nInputs std
682  label nParameters = nRBFs_[modelName] * (2 * nInputs + 1);
683 
684  return nParameters;
685  }
686  else if (modelType_[modelName] == "externalTensorFlow")
687  {
688  // do nothing
689  return 0;
690  }
691  else
692  {
693  FatalErrorIn("") << "modelType_: " << modelType_[modelName] << " not supported. Options are: neuralNetwork, radialBasisFunction, and externalTensorFlow" << abort(FatalError);
694  return 0;
695  }
696 }
697 
698 label DARegression::checkOutput(word modelName, volScalarField& outputField)
699 {
700  /*
701  Description:
702  check if the output values are valid otherwise bound or fix them
703 
704  Output:
705 
706  Return 1 if there is invalid value in the output. Return 0 if successful
707  */
708 
709  label fail = 0;
710 
711  // check if the output value is valid.
712  label isNaN = 0;
713  label isInf = 0;
714  label isBounded = 0;
715  forAll(mesh_.cells(), cellI)
716  {
717  if (std::isnan(outputField[cellI]))
718  {
719  outputField[cellI] = defaultOutputValue_[modelName];
720  isNaN = 1;
721  }
722  if (std::isinf(outputField[cellI]))
723  {
724  outputField[cellI] = defaultOutputValue_[modelName];
725  isInf = 1;
726  }
727  if (outputField[cellI] > outputUpperBound_[modelName])
728  {
729  outputField[cellI] = outputUpperBound_[modelName];
730  isBounded = 1;
731  }
732  if (outputField[cellI] < outputLowerBound_[modelName])
733  {
734  outputField[cellI] = outputLowerBound_[modelName];
735  isBounded = 1;
736  }
737  }
738  if (isBounded == 1)
739  {
740  Pout << "************* Warning! output values are bounded between " << outputLowerBound_[modelName] << " and " << outputUpperBound_[modelName] << endl;
741  fail = 1;
742  }
743 
744  if (isNaN == 1)
745  {
746  Pout << "************* Warning! output values have nan and are set to " << defaultOutputValue_[modelName] << endl;
747  fail = 1;
748  }
749 
750  if (isInf == 1)
751  {
752  Pout << "************* Warning! output values have inf and are set to " << defaultOutputValue_[modelName] << endl;
753  fail = 1;
754  }
755 
756  reduce(fail, sumOp<label>());
757 
758  return fail;
759 }
760 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
761 
762 } // End namespace Foam
763 
764 // ************************************************************************* //
Foam::DARegression::mesh_
const fvMesh & mesh_
Foam::fvMesh object.
Definition: DARegression.H:44
U
volVectorField & U
Definition: createRefsPimpleDyM.H:7
Foam::DAUtility::pyCalcBeta
static void * pyCalcBeta
define a function pointer template for Python call back
Definition: DAUtility.H:117
Foam::DARegression::inputNames_
HashTable< wordList > inputNames_
a list of words for the inputs
Definition: DARegression.H:62
Foam::DAUtility::pyCalcBetaInterface
static pyComputeInterface pyCalcBetaInterface
Definition: DAUtility.H:118
Foam::DARegression::activationFunction_
HashTable< word > activationFunction_
neural network activation function
Definition: DARegression.H:86
Foam::DARegression::outputName_
HashTable< word > outputName_
a list of words for the outputs
Definition: DARegression.H:65
Foam::DAOption
Definition: DAOption.H:29
Foam::DARegression::modelType_
HashTable< word > modelType_
the type of regression model
Definition: DARegression.H:59
Foam::DARegression::featuresFlattenArray_
scalar * featuresFlattenArray_
a flatten feature array for externalTensorFlow model
Definition: DARegression.H:117
Foam::DARegression::leakyCoeff_
HashTable< scalar > leakyCoeff_
if the ReLU activation function is used we can prescribe a potentially leaky coefficient
Definition: DARegression.H:89
Foam::DAUtility::pySetModelNameInterface
static pySetCharInterface pySetModelNameInterface
Definition: DAUtility.H:124
Foam::DARegression::nRBFs_
HashTable< label > nRBFs_
number of radial basis function
Definition: DARegression.H:104
Foam::DARegression::useExternalModel_
label useExternalModel_
whether to use external model
Definition: DARegression.H:123
Foam::DARegression::modelNames_
wordList modelNames_
name of the regression models
Definition: DARegression.H:56
Foam::DARegression::parameters_
HashTable< scalarList > parameters_
the parameters for the regression model
Definition: DARegression.H:83
Foam::DAOption::getAllOptions
const dictionary & getAllOptions() const
return a reference of allOptions_ dictionary
Definition: DAOption.H:56
mesh
fvMesh & mesh
Definition: createRefsHeatTransfer.H:4
Foam::DARegression::outputFieldArray_
scalar * outputFieldArray_
output field array for externalTensorFlow model
Definition: DARegression.H:120
Foam::DAUtility::pySetModelName
static void * pySetModelName
Definition: DAUtility.H:123
Foam::DARegression::outputScale_
HashTable< scalar > outputScale_
we can scale the output. we always shift before scaling it.
Definition: DARegression.H:80
p
volScalarField & p
Definition: createRefsPimpleDyM.H:6
Foam::DARegression::featuresFlattenArraySize_
label featuresFlattenArraySize_
the array size is chosen based on the regModel that has the largest number of inputs (this is importa...
Definition: DARegression.H:126
Foam::DARegression::checkOutput
label checkOutput(word modelName, volScalarField &outputField)
check if the output values are valid otherwise bound or fix them
Definition: DARegression.C:698
Foam::DARegression::inputShift_
HashTable< scalarList > inputShift_
we can shift each input. we always shift before scaling it.
Definition: DARegression.H:71
Foam::DARegression::inputScale_
HashTable< scalarList > inputScale_
we can scale each input. we always shift before scaling it.
Definition: DARegression.H:74
Foam::DAModel
Definition: DAModel.H:57
Foam::DARegression::daModel_
const DAModel & daModel_
DAModel object.
Definition: DARegression.H:50
k
dimensionedScalar & k
Definition: createRefsHeatTransfer.H:6
Foam
Definition: checkGeometry.C:32
Foam::DARegression::calcInputFeatures
void calcInputFeatures(word modelName)
calculate the input flow features
Definition: DARegression.C:161
Foam::DARegression::active_
label active_
whether the regression model is active
Definition: DARegression.H:53
Foam::DARegression::outputLowerBound_
HashTable< scalar > outputLowerBound_
the lower bound for the output
Definition: DARegression.H:95
Foam::DARegression::printInputInfo_
HashTable< label > printInputInfo_
whether to print the input range info this is used to scale the input
Definition: DARegression.H:98
Foam::DATurbulenceModel::nu
tmp< volScalarField > nu() const
get the nu field
Definition: DATurbulenceModel.C:267
forAll
forAll(nuTilda1, cellI)
Definition: nuTilda1EqnIrkPimple.H:19
Foam::DARegression::outputShift_
HashTable< scalar > outputShift_
we can shift the output. we always shift before scaling it.
Definition: DARegression.H:77
Foam::DAModel::getTurbConvOverProd
void getTurbConvOverProd(volScalarField &CoP) const
return the value of the convective over production term from the turbulence model
Definition: DAModel.C:292
Foam::DAModel::getTurbProdOverDestruct
void getTurbProdOverDestruct(volScalarField &PoD) const
return the value of the destruction term from the turbulence model
Definition: DAModel.C:273
Foam::DARegression::writeFeatures_
HashTable< label > writeFeatures_
whether to write the feature fields to the disk
Definition: DARegression.H:129
DARegression.H
Foam::DARegression::outputUpperBound_
HashTable< scalar > outputUpperBound_
the upper bound for the output
Definition: DARegression.H:92
Foam::DARegression::hiddenLayerNeurons_
HashTable< labelList > hiddenLayerNeurons_
number of neurons hidden layers of the neural network
Definition: DARegression.H:68
Foam::DAModel::getDATurbulenceModel
const DATurbulenceModel & getDATurbulenceModel() const
get a reference to DATurbulenceModel
Definition: DAModel.C:176
Foam::DARegression::features_
HashTable< PtrList< volScalarField > > features_
a list of scalarField to save the input features
Definition: DARegression.H:107
Foam::DARegression::nParameters
label nParameters(word modelName)
get the number of parameters for this regression model
Definition: DARegression.C:638
Foam::DARegression::defaultOutputValue_
HashTable< scalar > defaultOutputValue_
default output values
Definition: DARegression.H:101
Foam::DARegression::compute
label compute()
compute the output based on the latest parameters and inputs
Definition: DARegression.C:340