PlasCom2  1.0
XPACC Multi-physics simluation application
NavierStokesRHS.H
Go to the documentation of this file.
1 #ifndef __NAVIERSTOKES_RHS_H__
2 #define __NAVIERSTOKES_RHS_H__
3 
4 #include <limits>
5 #ifdef USE_OMP
6 #include <omp.h>
7 #endif
8 
9 #include "Simulation.H"
10 #include "EulerUtil.H"
11 #include "EulerKernels.H"
12 #include "ViscidUtil.H"
13 #include "ViscidKernels.H"
14 #include "OperatorKernels.H"
15 #include "MetricKernels.H"
16 #include "GridKernels.H"
17 #include "Stencil.H"
18 #include "SATKernels.H"
19 #include "WENO.H"
20 #include "EOS.H"
21 
22 static const int ALTERNATERHS = 1;
23 static const int EXCHANGEFLUX = 2;
24 static const int EXCHANGEDV = 4;
25 static const int ARTDISS = 8;
26 static const int USEWENO = 16;
27 
28 static const int holeMask = 1 << mask::HOLE;
29 
30 using namespace simulation::domain::boundary::bc;
31 #define CONSTANT_DT 0
32 #define CONSTANT_CFL 1
33 namespace navierstokes {
34 
37 
39 
40 
41  template<typename GridT, typename StateT, typename OperatorT>
42  class rhs : public simulation::rhs::domain_base<GridT,StateT,OperatorT> {
43 
44  public:
45 
46  typedef GridT GridType;
47  typedef StateT StateType;
48  typedef OperatorT OperatorType;
52  typedef typename rhs_base_t::BCType BCType;
53 
54  using rhs_base_t::globalPtr;
55 
56  public:
57 
58  rhs() : rhs_base_t(), useAlternateRHS(false), exchangeFlux(false),
59  exchangeDV(false), artificialDissipation(false),useWENO(false)
60  {
61  globalPtr = NULL;
62  };
63 
64 
65  // ===== Main interface functions ====
66 
67  // - First thing to call from outside, sets up grid, halo, and state
68  // Only called once ever
69  int Initialize(GridType &inGrid, StateType &inState, StateType &inParam,
70  OperatorType &inOperator)
71  {
72 
73  tauCounter = 0;
74 
75  if(Init()){
76  std::cout << "NavierStokesRHS:Fatal error: Init failed." << std::endl;
77  return(1);
78  }
79 
80  if(SetGrid(inGrid)){
81  std::cout << "NavierStokesRHS:Fatal error: Set Grid failed." << std::endl;
82  return(2);
83  }
84 
85  if(SetParamState(inParam)){
86  std::cout << "NavierStokesRHS:Fatal error: Set params failed." << std::endl;
87  return(6);
88  }
89 
90  if(globalPtr){
91 #pragma omp master
92  {
93  globalPtr->FunctionEntry("InitializeState");
94  }
95  }
96  if(InitializeState(inState)){
97  std::cout << "NavierStokesRHS:Fatal error: Initialize state failed." << std::endl;
98  return(3);
99  }
100  if(globalPtr){
101 #pragma omp master
102  {
103  globalPtr->FunctionExit("InitializeState");
104  globalPtr->FunctionEntry("InitMinSpacing");
105  }
106  }
107 
108  InitMinSpacing();
109 
110  if(globalPtr){
111 #pragma omp master
112  {
113  globalPtr->FunctionExit("InitMinSpacing");
114  globalPtr->FunctionEntry("HaloSetup");
115  }
116  }
117 
118  if(HaloSetup(inGrid.Halo())){
119  std::cout << "NavierStokesRHS:Fatal error: Halo setup failed." << std::endl;
120  return(4);
121  }
122 
123  if(globalPtr){
124 #pragma omp master
125  {
126  globalPtr->FunctionExit("HaloSetup");
127  globalPtr->FunctionEntry("SetupOperators");
128  }
129  }
130 
131  if(SetupOperators(inOperator)){
132  std::cout << "NavierStokesRHS:Fatal error: Operator setup failed." << std::endl;
133  return(5);
134  }
135  if(globalPtr){
136 #pragma omp master
137  {
138  globalPtr->FunctionExit("SetupOperators");
139  }
140  }
141 
142  // std::cout << "Configured with grid type = " << gridType << std::endl;
143  return(0);
144 
145  };
146 
147  // - Called any time the state/rhs are different, for RK, that's
148  // every substep as each stage has separate storage
149  int SetupRHS(GridType &inGrid, StateType &inState,
150  StateType &rhsState,int myThreadId = 0)
151  {
152 
153 
154  errorCode = 0;
155 
156 #pragma omp master
157  {
158  errorCode = SetHalo(inGrid.Halo());
159  }
160 
161 #pragma omp barrier
162 
163  if(errorCode > 0)
164  return(errorCode);
165 
166 #pragma omp master
167  {
168  errorCode = SetState(inState);
169  }
170 #pragma omp barrier
171 
172  if(errorCode > 0)
173  return(errorCode);
174 
175 #pragma omp master
176  {
177  errorCode = SetRHSState(rhsState);
178  }
179 
180 #pragma omp barrier
181 
182  if(errorCode > 0)
183  return(errorCode);
184 
185  return(0);
186 
187  };
188 
189  // main call to get RHS (SetupRHS needs to have been called first)
190  int PrepareBuffers(int threadId)
191  {
192 
193  {
194  std::vector<size_t>
195  &fortranBufferInterval(fortranBufferIntervals[threadId]);
196 
197  // Zero the RHS buffers
198  if(globalPtr){
199 #pragma omp barrier
200 #pragma omp master
201  {
202  globalPtr->FunctionEntry("ZeroRHS");
203  }
204  }
205 
206  double zero = 0.0;
207  for(int iEquation = 0;iEquation < numEquations;iEquation++){
209  (&numDim,&numPointsBuffer,&bufferSizes[0],
210  &fortranBufferInterval[0],&zero,myRHSBuffers[iEquation]);
211  }
212 
213  if(globalPtr){
214 #pragma omp barrier
215 #pragma omp master
216  {
217  globalPtr->FunctionExit("ZeroRHS");
218  }
219  }
220  }
221  return(0);
222  };
223 
224  int RHS(int threadId)
225  {
226 
227  PrepareBuffers(threadId);
228 
229  // Calculate DV locally (exchange if !exchangeFlux)
230  // (p,T), 1/rho, v, e
231  if(ComputeDV(threadId))
232  return(1);
233 
234  // Calculate TV locally (exchange if !exchangeFlux)
235  // mu, lambda, k ...
236  if(refRe > 0){
237 
238  if(ComputeTV(threadId))
239  return(1);
240  }
241  if(refRe > 0 || artificialDissipation){
242 
243  if(ComputeVelGrad(threadId))
244  return(1);
245 
246  if(artificialDissipation){
247  if(ComputeVelDiv(threadId))
248  return(1);
249  }
250 
251  if(ComputeStressTensor(threadId))
252  return(1);
253 
254  if(ComputeTemperatureGrad(threadId))
255  return(1);
256 
257  // Calculates heat flux (-q_i)
258  if(ComputeHeatFlux(threadId))
259  return(1);
260 
261  // Computes Q_i = -q_i + (v_j * tau_i_j)
262  // stored in heatFluxBuffer
263  if(ComputeViscidEnergyFlux(threadId))
264  return(1);
265  }
266 
267 
268  double m1 = -1.0;
270  // Jacobian can be applied in one fell swoop for uniform grids
271  std::vector<double> &gridJacobian(gridPtr->Jacobian());
272  m1 *= gridJacobian[0];
273  }
274 
275  // For each spatial dimension (iDim):
276  for(int iDim = 0;iDim < numDim;iDim++){
277 
278  // Populates inviscidFluxBuffer
279  if(InviscidFlux(iDim,threadId))
280  return(iDim+2);
281 
282 
283  // Populates viscidFluxBuffer
284  if(refRe > 0){
285  ViscidFlux(iDim,threadId);
286  }
287 
288  if(useWENO){
289  // Exchange states
290  if(ExchangeState(threadId))
291  return(iDim+53);
292 
293  // Exchange fluxes
294  if(ExchangeBuffer(fluxMessageId,numEquations,inviscidFluxBuffer,threadId))
295  return(iDim+13);
296 
297  int wenoStatus = ApplyWENO(iDim,threadId);
298 
299  if(wenoStatus > 0){
300  // process some WENO error
301 #pragma omp master
302  {
303  if(globalPtr){
304  std::ostringstream statusMessage;
305  statusMessage << "ApplyWENO returned error code ("
306  << wenoStatus << "), exiting."
307  << std::endl;
308  globalPtr->StdOut(statusMessage.str());
309  pcpp::io::RenewStream(statusMessage);
310  }
311  }
312  // returns error status to caller
313  return(wenoStatus);
314  }
315 
316  // Sum WENO version of dFluxBuffer to RHS [note use of (m1) uniform gridJac]
317  AccumulateToRHS(m1,dFluxBuffer,threadId);
318 
319  if(refRe > 0){ // update RHS with viscous part
320  if(exchangeFlux){ // exchange flux if required
321  if(ExchangeBuffer(fluxMessageId,numEquations,viscidFluxBuffer,threadId))
322  return(iDim+13);
323  }
324  if(ApplyOperator(iDim,numEquations,viscidFluxBuffer,dFluxBuffer,threadId))
325  return(1);
326  // Sum viscous version of dFluxBuffer to RHS [" note above]
327  double m2 = -1.0*m1;
328  AccumulateToRHS(m2,dFluxBuffer,threadId);
329  }
330  } else { // no WENO
331  if (refRe > 0) { // add viscous flux to inviscid flux
332  AccumulateViscousFluxes(-1.0,viscidFluxBuffers,inviscidFluxBuffers,threadId,!exchangeFlux);
333  }
334  if(exchangeFlux){ // exchange flux if required
335  if(ExchangeBuffer(fluxMessageId,numEquations,inviscidFluxBuffer,threadId))
336  return(iDim+13);
337  }
338 
339  // std::ofstream Ouf;
340  // std::ostringstream oufName;
341  // oufName << "aflux_" << iDim << "_" << (tauCounter-1);
342  // Ouf.open(oufName.str().c_str());
343  // Ouf << std::setprecision(12);
344  // Ouf << numEquations << " " << numPointsBuffer << std::endl;
345  // double *tPtr2 = inviscidFluxBuffer;
346  // for(int iComp = 0;iComp < numEquations;iComp++){
347  // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
348  // Ouf << tPtr2[iComp*numPointsBuffer+iPoint] << " ";
349  // Ouf << std::endl;
350  // }
351  // Ouf << std::endl;
352  // Ouf.close();
353 
354  if(ApplyOperator(iDim,numEquations,inviscidFluxBuffer,dFluxBuffer,threadId))
355  return(1);
356 
357  // std::ofstream Ouf2;
358  // std::ostringstream oufName2;
359  // oufName2 << "dflux_" << iDim << "_" << (tauCounter-1);
360  // Ouf2.open(oufName2.str().c_str());
361  // Ouf2 << std::setprecision(12);
362  // Ouf2 << numEquations << " " << numPointsBuffer << std::endl;
363  // double *tPtr = dFluxBuffer;
364  // for(int iComp = 0;iComp < numEquations;iComp++){
365  // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
366  // Ouf2 << tPtr[iComp*numPointsBuffer+iPoint] << " ";
367  // Ouf2 << std::endl;
368  // }
369  // Ouf2 << std::endl;
370  // Ouf2.close();
371 
372  // std::cout << "M1 = " << m1 << std::endl;
373 
374  // Sum inviscid+viscous version of dFluxBuffer to RHS [" note above]
375  AccumulateToRHS(m1,dFluxBuffer,threadId);
376 
377  // std::ofstream Ouf3;
378  // std::ostringstream oufName3;
379  // oufName3 << "rhs_" << iDim << "_" << (tauCounter-1);
380  // Ouf3.open(oufName3.str().c_str());
381  // Ouf3 << std::setprecision(12);
382  // Ouf3 << numEquations << " " << numPointsBuffer << std::endl;
383  // for(int iComp = 0;iComp < numEquations;iComp++){
384  // double *tPtr = myRHSBuffers[iComp];
385  // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
386  // Ouf3 << tPtr[iPoint] << " ";
387  // Ouf3 << std::endl;
388  // }
389  // Ouf3 << std::endl;
390  // Ouf3.close();
391  } // no weno
392 
393 
394  } // Loop over spatial dimensions
395 
396  if(gridType > simulation::grid::UNIRECT){ // apply Jacobian if necessary
397  GridScaleRHS(threadId);
398  }
399 
400 
401 
402 
403  // std::ofstream Ouf4;
404  // std::ostringstream oufName4;
405  // oufName4 << "psrhs_" << (tauCounter-1);
406  // Ouf4.open(oufName4.str().c_str());
407  // Ouf4 << std::setprecision(12);
408  // Ouf4 << numEquations << " " << numPointsBuffer << std::endl;
409  // for(int iComp = 0;iComp < numEquations;iComp++){
410  // double *tPtr = myRHSBuffers[iComp];
411  // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
412  // Ouf4 << tPtr[iPoint] << " ";
413  // Ouf4 << std::endl;
414  // }
415  // Ouf4 << std::endl;
416  // Ouf4.close();
417 
418 
419  // ------- Process other sources ---------
420  // 1. boundary sources
421  if(globalPtr){
422 #pragma omp barrier
423 #pragma omp master
424  {
425  globalPtr->FunctionEntry("HandleBCs");
426  }
427  }
428  int returnCode = HandleBoundaryConditions(threadId);
429  if(globalPtr){
430 #pragma omp barrier
431 #pragma omp master
432  {
433  globalPtr->FunctionExit("HandleBCs");
434  }
435  }
436  if(returnCode > 0)
437  return(returnCode+100);
438 
439  // std::ofstream Ouf5;
440  // std::ostringstream oufName5;
441  // oufName5 << "finalrhs_" << (tauCounter-1);
442  // Ouf5.open(oufName5.str().c_str());
443  // Ouf5 << std::setprecision(12);
444  // Ouf5 << numEquations << " " << numPointsBuffer << std::endl;
445  // for(int iComp = 0;iComp < numEquations;iComp++){
446  // double *tPtr = myRHSBuffers[iComp];
447  // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
448  // Ouf5 << tPtr[iPoint] << " ";
449  // Ouf5 << std::endl;
450  // }
451  // Ouf5 << std::endl;
452  // Ouf5.close();
453 
454 
455  // 2. other sources
456  returnCode = HandleSources(threadId);
457 
458 
459  if(returnCode > 0)
460  return(returnCode+200);
461 
462 #pragma omp barrier
463  return(0);
464  };
465 
466  // === Secondary interface/wrapper functions =====
467 
468  // Computes dFluxBuffer using WENO reconstruction on fluxBuffer
469  //
470  // Inputs/Outputs
471  // iDim -- Dimension along which to operate
472  // threadId -- OpenMP thread number
473  int ApplyWENO(int iDim, int threadId) {
474 #pragma omp barrier
475  if(globalPtr){
476 #pragma omp master
477  {
478  globalPtr->FunctionEntry("ApplyWENO");
479  }
480  }
481 
482  int winWidth = coeffsWENO.hiAll - coeffsWENO.loAll + 1;
483 
484  double fluxWindow[winWidth*numEquations];
485  double projFluxWindow[winWidth*numEquations];
486  double projFluxReconst[numEquations];
487 
488  double stateL[numEquations];
489  double stateR[numEquations];
490  double deltaState[numEquations];
491  double projDeltaState[numEquations];
492  double fluxL[numEquations];
493  double fluxR[numEquations];
494 
495  double tmat[numEquations*numEquations];
496  double tinv[numEquations*numEquations];
497  double lambda[numEquations*numEquations];
498  double lambdaL[numEquations*numEquations];
499  double lambdaR[numEquations*numEquations];
500  double normVec[numDim];
501  for (int i=0; i<numDim; i++) {
502  if (i == iDim) normVec[i] = 1;
503  else normVec[i] = 0;
504  }
505 
506  const std::vector<pcpp::IndexIntervalType>
507  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
508 
509  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
510 
512  bufferInterval.InitSimple(bufferSizes);
513 
514  int jDim = (iDim + 1)%numDim;
515  int kDim = (iDim + 2)%numDim; // not needed if numDim == 2
516 
517  size_t js = regionInterval[jDim].first;
518  size_t je = regionInterval[jDim].second;
519 
520  // if numDim == 2 we only want the "k" loop to iterate one time
521  size_t ks = 0;
522  size_t ke = 0;
523  if (numDim == 3) {
524  ks = regionInterval[kDim].first;
525  ke = regionInterval[kDim].second;
526  }
527 
528  for (size_t k=ks; k<=ke; k++) {
529  pcpp::IndexIntervalType opInterval(regionInterval);
530 
531  // In the case numDim == 2, kDim == iDim, so we don't want to break
532  // anything by setting opInterval[kDim]
533  if (numDim == 3) {
534  opInterval[kDim].first = k;
535  opInterval[kDim].second = k;
536  }
537 
538  for (size_t j=js; j<=je; j++) {
539  opInterval[jDim].first = j;
540  opInterval[jDim].second = j;
541 
542  size_t is = regionInterval[iDim].first;
543  size_t ie = regionInterval[iDim].second;
544  // Note: This routine assumes coeffsWENO.loAll <= 0, coeffsWENO.hiAll >= 0
545  opInterval[iDim].first = is + coeffsWENO.loAll - 1; // -1 because we need to compute WENO flux for left face of leftmost point
546  opInterval[iDim].second = ie + coeffsWENO.hiAll;
547 
548  std::vector<size_t> indices;
549  bufferInterval.GetFlatIndices(opInterval, indices);
550 
551  // setup window with fluxes for i == -1
552  for (int loc=0; loc<winWidth; loc++) {
553  for (int eqn=0; eqn<numEquations; eqn++) {
554  // fluxWindow[eqn*winWidth + loc] = fluxBuffer[eqn*numPointsBuffer + indices[loc]];
555  fluxWindow[eqn*winWidth + loc] = inviscidFluxBuffer[eqn*numPointsBuffer + indices[loc]];
556  }
557  }
558 
559  // ii = index into indices, not necessarily falling between is and ie
560  for (size_t ii=-coeffsWENO.loAll; ii<indices.size()-coeffsWENO.hiAll; ii++) {
561  // get left and right state vectors
562  for (int eqn=0; eqn<numEquations; eqn++) {
563  stateL[eqn] = myStateBuffers[eqn][indices[ii]];
564  stateR[eqn] = myStateBuffers[eqn][indices[ii+1]];
565  deltaState[eqn] = stateR[eqn] - stateL[eqn];
566  }
567 
568  // First use tmat, tinv as temp buffers to get eigenvalues of
569  // Jacobians evaluated at left and right states (needed for
570  // entropy fix computation)
571  double gamma = eosPtr->GetGamma();
572  FC_MODULE(satutil,sat_form_roe_matrices,SATUTIL,SAT_FORM_ROE_MATRICES)
573  (&numDim, &stateL[0], &stateL[0], &gamma, &normVec[0], &tmat[0], &tinv[0], &lambdaL[0]);
574  FC_MODULE(satutil,sat_form_roe_matrices,SATUTIL,SAT_FORM_ROE_MATRICES)
575  (&numDim, &stateR[0], &stateR[0], &gamma, &normVec[0], &tmat[0], &tinv[0], &lambdaR[0]);
576 
577  // Now fill tmat, tinv, lambda with Roe eigensystem from
578  // intermediate state
579  FC_MODULE(satutil,sat_form_roe_matrices,SATUTIL,SAT_FORM_ROE_MATRICES)
580  (&numDim, &stateL[0], &stateR[0], &gamma, &normVec[0], &tmat[0], &tinv[0], &lambda[0]);
581 
582  WENO::Project(numEquations, winWidth, &tinv[0], &fluxWindow[0], &projFluxWindow[0]);
583 
584  // setup for entropy fix
585  WENO::Project(numEquations, 1, &tinv[0], &deltaState[0], &projDeltaState[0]);
586  double eta = WENO::EntropyFixEta(numEquations, &lambdaL[0], &lambdaR[0]);
587 
588  for (int eqn=0; eqn<numEquations; eqn++) {
589  int diag = eqn*(numEquations+1); // get diagonal element of lambda matrix
590  int group = (1 + (int) copysign(1.0, lambda[diag]))/2;
591 
592  int startLoc = coeffsWENO.LoGroup(group) - coeffsWENO.loAll;
593  WENO::ReconstPointVal(&projFluxWindow[eqn*winWidth + startLoc], coeffsWENO, group, projFluxReconst[eqn]);
594 
596  double lambdaAbs = std::abs(lambda[diag]);
597  double entropyFix = (eta > lambdaAbs ? 0.5*(eta - lambdaAbs) : 0);
598  entropyFix *= projDeltaState[eqn];
599 
600  //if (entropyFix > 0) {
601  // std::cout << "eta = " << eta << std::endl;
602  // std::cout << "lambdaAbs = " << lambdaAbs << std::endl;
603  // std::cout << "projDeltaState[" << eqn << "] = " << projDeltaState[eqn] << std::endl;
604  // std::cout << "entropyFix[" << eqn << "] = " << entropyFix << std::endl;
605  //}
606 
607  projFluxReconst[eqn] -= entropyFix;
608  }
609 
610  WENO::Project(numEquations, 1, &tmat[0], &projFluxReconst[0], &fluxR[0]);
611 
612  for (int eqn=0; eqn<numEquations && ii > -coeffsWENO.loAll; eqn++) {
613  dFluxBuffer[eqn*numPointsBuffer + indices[ii]] = fluxR[eqn] - fluxL[eqn];
614  }
615 
616  // shift fluxWindow to right
617  for (int loc=1; loc<winWidth; loc++) {
618  for (int eqn=0; eqn<numEquations; eqn++) {
619  int currIndex = eqn*winWidth + loc;
620  int newIndex = eqn*winWidth + (loc-1);
621  fluxWindow[newIndex] = fluxWindow[currIndex];
622  }
623  }
624 
625  // copy new values into fluxWindow
626  for (int eqn=0; ((eqn<numEquations) && (ii < (indices.size()-coeffsWENO.hiAll-1))); eqn++) {
627  // fluxWindow[eqn*winWidth + (winWidth-1)] = fluxBuffer[eqn*numPointsBuffer + indices[ii+coeffsWENO.hiAll+1]];
628  fluxWindow[eqn*winWidth + (winWidth-1)] = inviscidFluxBuffer[eqn*numPointsBuffer + indices[ii+coeffsWENO.hiAll+1]];
629  }
630 
631  for (int eqn=0; eqn<numEquations; eqn++) {
632  fluxL[eqn] = fluxR[eqn];
633  }
634 
635  } // loop over points in iDim
636  } // loop over points in jDim
637  } // loop over points in kDim
638 
639 #pragma omp barrier
640  if(globalPtr){
641 #pragma omp master
642  {
643  globalPtr->FunctionExit("ApplyWENO");
644  }
645  }
646 
647  return 0;
648  };
649 
652  int ArtificialDissipation(int threadId)
653  {
654  size_t *dOpInterval = &fortranPartitionBufferIntervals[threadId][0];
655  std::vector<double> &gridMetric(gridPtr->Metric());
656  std::vector<double> &gridJacobian(gridPtr->Jacobian());
657 
658 #pragma omp barrier
659  if(globalPtr){
660 #pragma omp master
661  {
662  globalPtr->FunctionEntry("ArtificialDissipation");
663  }
664  }
665 
666  double dilRamp = 10.0;
667  double sigmaDil = inputSigmaDil;
668  double sigmaDiss = inputSigma;
669  double dilCutoff = -1.5;
670 
671  FC_MODULE(satutil,dissipationweight,SATUTIL,DISSIPATIONWEIGHT)
672  (&numDim,&bufferSizes[0],&numPointsBuffer,dOpInterval,&sigmaDiss,
673  &sigmaDil,&dilRamp,&dilCutoff,velDiv,sigmaDissipation);
674 
675  // Communicate RHS
676  for(int iDim = 0;iDim < numDim;iDim++){
677 
678  int dissDir = iDim + 1;
679 
680  // Get Ram's alpha weight
681  FC_MODULE(metricops,alphaweight,METRICOPS,ALPHAWEIGHT)
682  (&numDim,&numPointsBuffer,&bufferSizes[0],dOpInterval,
683  &gridType,&gridMetric[0],&dissDir,alphaDissipation);
684 
685  // Copy State into fluxBuffer
687  for(int iEquation= 0;iEquation < numEquations;iEquation++){
688 
689  double *iFlux = &inviscidFluxBuffer[iEquation*numPointsBuffer];
690  double *stateBuf = myStateBuffers[iEquation];
691 
692 
694  (&numDim,&numPointsBuffer,&bufferSizes[0],dOpInterval,
695  stateBuf,iFlux);
696  }
697 
698  ExchangeBuffer(fluxMessageId,numEquations,inviscidFluxBuffer,threadId);
699 
700 #pragma omp barrier
701 
702  // Apply dissipation operator on state component:
703  ApplyDissipationOperator(iDim,numEquations,inviscidFluxBuffer,dFluxBuffer,false,threadId);
704 
705  for(int iEquation= 0;iEquation < numEquations;iEquation++){
706 
707  double *dFlux = &dFluxBuffer[iEquation*numPointsBuffer];
708  double *iFlux = &inviscidFluxBuffer[iEquation*numPointsBuffer];
709  // Repack the inviscidFluxBuffer by
710  // performing R. Vishnampet's metric-based weighting:
711  // ... Malpha=sqrt(M_{alpha,i} * M_{alpha,i}) ; alpha=dir
712  // ... iFlux = alphaDissipation * dFlux
714  (&numDim,&numPointsBuffer,&bufferSizes[0],dOpInterval,alphaDissipation,
715  dFlux,iFlux);
716  }
717 
718  // Exchange inviscidFluxBuffer for stencil op
719  ExchangeBuffer(fluxMessageId,numEquations,inviscidFluxBuffer,threadId);
720 
721 #pragma omp barrier
722 
723  // Apply split operator on inviscidFluxBuffer:
724  ApplyDissipationOperator(iDim,numEquations,inviscidFluxBuffer,dFluxBuffer,true,threadId);
725 
726  // Update RHS
727  for(int iEquation= 0;iEquation < numEquations;iEquation++){
728 
729  double *dFlux = &dFluxBuffer[iEquation*numPointsBuffer];
730  double *rhsPtr = myRHSBuffers[iEquation];
731 
733  (&numDim,&numPointsBuffer,&bufferSizes[0],dOpInterval,
734  sigmaDissipation,dFlux,rhsPtr);
735 
736  }
737 
738  } // Loop over spatial dimensions
739 #pragma omp barrier
740  {
741  if(globalPtr){
742 #pragma omp master
743  globalPtr->FunctionExit("ArtificialDissipation");
744  }
745  }
746  return(0);
747  }; // Artificial dissipation
748 
749  // No sources (yet)
750  int HandleSources(const pcpp::IndexIntervalType &regionInterval)
751  {return(0);};
752 
753  // No sources (yet)
754  int HandleSources(int myThreadId){
755  int returnCode = 0;
756 
757  if(artificialDissipation){
758  returnCode = ArtificialDissipation(myThreadId);
759  }
760 
761  return(0);
762  };
763 
764 
765  int HandleBoundaryConditions(int threadId)
766  {
768 
769  // note: profile timers cannot be safely used here
770  // due to thread-specific processing performed in this
771  // routine. uncomment bc timers below only for problems
772  // designed to properly measure BC performance.
773 
774  if(!domainBCsPtr)
775  return(0);
776  else if (!subRegionsPtr || !domainBoundariesPtr)
777  return(1);
778 
779  std::vector<BoundaryType> &domainBoundaries(*domainBoundariesPtr);
780  std::vector<BCType> &domainBCs(*domainBCsPtr);
781  std::vector<GridRegionType> &gridRegions(*subRegionsPtr);
782  std::vector<double> &gridMetric(gridPtr->Metric());
783  std::vector<double> &gridJacobian(gridPtr->Jacobian());
784 
785  int numBCs = domainBCs.size();
786  int numGridRegions = gridRegions.size();
787  int numDomainBoundaries = domainBoundaries.size();
788 
789  if(numBCs == 0)
790  return(0);
791  if(numDomainBoundaries == 0)
792  return(0);
793 
794  typename std::vector<BoundaryType>::iterator dbIt = domainBoundaries.begin();
795 
796  std::vector<double> gasParams(6,0.0);
797 
798  double gamma = eosPtr->GetGamma();
799  gasParams[0] = refRe;
800  gasParams[1] = gamma;
801  gasParams[2] = specGasConst;
802  gasParams[3] = temperatureScale;
803  gasParams[4] = refSndSpd;
804  gasParams[5] = std::max(gamma/refPr,5.0/3.0);
805 
806  while(dbIt != domainBoundaries.end()){
807 
808  BoundaryType &domainBoundary(*dbIt++);
809  GridRegionType &gridRegion(gridRegions[domainBoundary.GridRegionID()]);
810  std::vector<size_t>
811  &threadPartBufferIndices(gridRegion.threadPartitionBufferIndices[threadId]);
812  size_t numBoundaryPoints = threadPartBufferIndices.size();
813 
814  if(numBoundaryPoints > 0){ // this rank/thread owns part of the boundary
815 
816  BCType &boundaryCondition(domainBoundary.BC());
817  // unique int id for bc
818  int bcType = boundaryCondition.BCType();
819 
820  if(bcType == navierstokes::HOLE)
821  continue;
822 
823  int normalDir = gridRegion.normalDirection;
824  pcpp::IndexIntervalType &regionInterval(gridRegion.regionInterval);
825  std::vector<size_t> regionSizes(regionInterval.Sizes());
826  size_t numPointsRegion = regionInterval.NNodes();
827  pcpp::IndexIntervalType &regionOpBufferInterval(gridRegion.threadPartitionBufferIntervals[threadId]);
828  pcpp::IndexIntervalType &regionOpInterval(gridRegion.threadPartitionIntervals[threadId]);
829 
830  // bc-associated parameters from input file
832  StateType &bcParamState(boundaryCondition.Params());
833  double *bcParams = bcParamState.template GetFieldBuffer<double>("Numbers");
834  int *bcFlags = bcParamState.template GetFieldBuffer<int>("Flags");
835 
836  const std::string &bcName(boundaryCondition.BCName());
837  const std::string &regionName(gridRegion.regionName);
838  const std::string &boundaryName(domainBoundary.Name());
839 
840 // std::cout << "bcName " << bcName << " regionName " << regionName << " boundaryName "
841 // << boundaryName << " bcType " << bcType << std::endl;
842 // int numParams = bcParamState.GetFieldMetaData("Numbers").ncomp;
843 // std::cout << "&bcParams = " << bcParams << std::endl
844 // << "Number of parameters: " << numParams << std::endl;
845 // for(int i=0; i<numParams; i++){
846 // std::cout << "i " << i << " bcParams[i] " << bcParams[i] << std::endl;
847 // }
848 
849  switch(bcType) {
850 
852  // ** see timer note above
853  // globalPtr->FunctionEntry("Sponge");
854  if(normalDir != 0){
855  // bcParams[] = {spongeAmplitude,spongePower}
856  navierstokes::LinearSponge(bufferSizes,normalDir,regionInterval,regionOpInterval,
857  regionOpBufferInterval,bcParams,bcFlags,
858  numEquations,myStateBuffers,targetStateBuffers,myRHSBuffers);
859  } else {
860  // Sponge blobs are Gaussian
861  // bcParams[] = {spongeAmplitude,spongeWidth}
862  navierstokes::SpongeZone(bufferSizes,normalDir,regionInterval,regionOpInterval,
863  regionOpBufferInterval,bcParams,bcFlags,iMask,holeMask,
864  numEquations,myStateBuffers,targetStateBuffers,myRHSBuffers);
865  }
866  // globalPtr->FunctionExit("Sponge");
867  break;
868 
870  // globalPtr->FunctionEntry("Farfield");
871  bcParams[2] = boundaryWeight;
872  // Call SAT Farfield kernel
873  FC_MODULE(satutil,farfield,SATUTIL,FARFIELD)
874  ( &numDim,&bufferSizes[0],&numPointsBuffer,&normalDir,&regionSizes[0],
875  &numPointsRegion,&numBoundaryPoints,&threadPartBufferIndices[0],
876  &gridType,&gridMetric[0],&gridJacobian[0],bcParams,&gasParams[0],
877  rhoPtr,rhoVPtr,rhoEPtr,viscidFluxBuffer,&numScalar,scalarPtr,
878  rhoRHSPtr,rhoVRHSPtr,rhoERHSPtr,scalarRHSPtr,
879  rhoTargetPtr,rhoVTargetPtr,rhoETargetPtr,scalarTargetPtr);
880 
881  // globalPtr->FunctionExit("Farfield");
882  break;
883 
885  if(refRe > 0){
886  if(numScalar > 0) {
887  std::ostringstream errMsg;
888  errMsg << "NavierStokesRHS::HandleBoundaryConditions: Error SAT_NOSLIP_ISOTHERMAL "
889  << "is not implemented for scalar transport. Aborting." << std::endl;
890  if(globalPtr){
891  globalPtr->StdOut(errMsg.str());
892  } else {
893  std::cout << errMsg.str();
894  }
895  return(1);
896  }
897  bcParams[3] = boundaryWeight;
898 
899  // Call SAT NOSLIP of PlasComCM
900  FC_MODULE(satutil,noslip_isothermal,SATUTIL,NOSLIP_ISOTHERMAL)
901  ( &numDim,&bufferSizes[0],&numPointsBuffer,&normalDir,&regionSizes[0],
902  &numPointsRegion,&numBoundaryPoints,&threadPartBufferIndices[0],&gridType,
903  &gridMetric[0],&gridJacobian[0],&bcParams[0],&gasParams[0],rhoPtr,rhoVPtr,
904  rhoEPtr,&numScalar,scalarPtr,rhoRHSPtr,rhoVRHSPtr,rhoERHSPtr,
905  scalarRHSPtr,rhoTargetPtr,rhoVTargetPtr,rhoETargetPtr,
906  scalarTargetPtr,muPtr,lambdaPtr);
907 
908  } else {
909 
910  std::ostringstream errMsg;
911  errMsg << "NavierStokesRHS::HandleBoundaryConditions:Error: SAT_NOSLIP_ISOTHERMAL "
912  << "not a valid BC for inviscid (Re = 0) flows. Aborting." << std::endl;
913  if(globalPtr){
914  globalPtr->StdOut(errMsg.str());
915  } else {
916  std::cout << errMsg.str();
917  }
918  return(1);
919  }
920 
921  break;
923  bcParams[1] = boundaryWeight;
924 
925  FC_MODULE(satutil,slip_adiabatic,SATUTIL,SLIP_ADIABATIC)
926  ( &numDim,&bufferSizes[0],&numPointsBuffer,&normalDir,&regionSizes[0],
927  &numPointsRegion,&numBoundaryPoints,&threadPartBufferIndices[0],&gridType,
928  &gridMetric[0],&gridJacobian[0],bcParams,&gasParams[0],rhoPtr,rhoVPtr,
929  rhoEPtr,&numScalar,scalarPtr,rhoRHSPtr,rhoVRHSPtr,rhoERHSPtr,
930  scalarRHSPtr,rhoTargetPtr,rhoVTargetPtr,rhoETargetPtr,scalarTargetPtr);
931 
932  break;
933  default:
934  std::ostringstream errMsg;
935  errMsg << "NavierStokesRHS::HandleBoundaryConditions:Error: Could not resolve BC("
936  << bcName << ") = " << bcType << ". Aborting." << std::endl;
937  if(globalPtr){
938  globalPtr->StdOut(errMsg.str());
939  } else {
940  std::cout << errMsg.str() << std::endl;
941  }
942  return(1);
943  break;
944 
945  }
946 
947  }
948  }
949 
950  return(0);
951 
952  };
953 
954  int ApplyOperator(int dDir,int numComponents,double *uBuffer,double *duBuffer,
955  int threadId)
956  {
957 
958  size_t dirOffset = dDir*numPointsBuffer;
959  size_t *dOpInterval = &fortranPartitionBufferIntervals[threadId][0];
960 
961  int one = 1;
962  dDir++; // forticate direction
963 
964 #pragma omp barrier
965  if(globalPtr){
966 #pragma omp master
967  {
968  globalPtr->FunctionEntry("ApplyOperator");
969  }
970  }
971 
972  // applyoperator does equation-at-a-time
973  for(int iComp = 0;iComp < numComponents;iComp++){
974  size_t compOffset = iComp*numPointsBuffer;
976  (&numDim,&bufferSizes[0],&one,&numPointsBuffer,
977  &dDir,dOpInterval,&numStencils,stencilSizes,
978  stencilStarts,&numStencilValues,stencilWeights,
979  stencilOffsets,&stencilConn[dirOffset],
980  &uBuffer[compOffset],&duBuffer[compOffset]);
981  }
982  // applyoperatorv does numComponents (i.e. number of equations)
983  // loop on the inside
984  // FC_MODULE(operators,applyoperatorv,OPERATORS,APPLYOPERATORV)
985  // (&numDim,&bufferSizes[0],&one,&numPointsBuffer,
986  // &dDir,opInterval,&numStencils,stencilSizes,
987  // stencilStarts,&numStencilValues,stencilWeights,
988  // stencilOffsets,&stencilConn[dirOffset],
989  // &uBuffer[compOffset],&duBuffer[compOffset]);
990 
991 #pragma omp barrier
992  if(globalPtr){
993 #pragma omp master
994  {
995  globalPtr->FunctionExit("ApplyOperator");
996  }
997  }
998 
999  return(0);
1000  };
1001 
1002  int ApplyDissipationOperator(int dDir,int numComponents,double *uBuffer,double *duBuffer,
1003  bool split = false,int threadId = 0)
1004  {
1005 
1006  size_t dirOffset = dDir*numPointsBuffer;
1007  size_t *dOpInterval = &fortranPartitionBufferIntervals[threadId][0];
1008 
1009  int one = 1;
1010  dDir++; // forticate direction
1011 
1012 #pragma omp barrier
1013  if(globalPtr){
1014 #pragma omp master
1015  {
1016  globalPtr->FunctionEntry("ApplyDissOperator");
1017  }
1018  }
1019 
1020  int numStencilsDiss = 0;
1021  int *stencilSizesDiss = NULL;
1022  int *stencilStartsDiss = NULL;
1023  int numStencilValuesDiss = 0;
1024  double *stencilWeightsDiss = NULL;
1025  int *stencilOffsetsDiss = NULL;
1026  int *stencilConnDiss = NULL;
1027 
1028  // Assign dissipation stencil data
1029  if(split){
1030  numStencilsDiss = artDissOperatorSplit.numStencils;
1031  stencilSizesDiss = artDissOperatorSplit.stencilSizes;
1032  stencilStartsDiss = artDissOperatorSplit.stencilStarts;
1033  stencilWeightsDiss = artDissOperatorSplit.stencilWeights;
1034  stencilOffsetsDiss = artDissOperatorSplit.stencilOffsets;
1035  stencilConnDiss = artDissConnSplit;
1036  } else {
1037  numStencilsDiss = artDissOperator.numStencils;
1038  stencilSizesDiss = artDissOperator.stencilSizes;
1039  stencilStartsDiss = artDissOperator.stencilStarts;
1040  stencilWeightsDiss = artDissOperator.stencilWeights;
1041  stencilOffsetsDiss = artDissOperator.stencilOffsets;
1042  stencilConnDiss = artDissConn;
1043  }
1044 
1045  // applyoperator does equation-at-a-time
1046  for(int iComp = 0;iComp < numComponents;iComp++){
1047 
1048  size_t compOffset = iComp*numPointsBuffer;
1049 
1051  (&numDim,&bufferSizes[0],&one,&numPointsBuffer,
1052  &dDir,dOpInterval,&numStencilsDiss,stencilSizesDiss,
1053  stencilStartsDiss,&numStencilValuesDiss,stencilWeightsDiss,
1054  stencilOffsetsDiss,&stencilConnDiss[dirOffset],
1055  &uBuffer[compOffset],&duBuffer[compOffset]);
1056  }
1057  // applyoperatorv does numComponents (i.e. number of equations)
1058  // loop on the inside
1059  // FC_MODULE(operators,applyoperatorv,OPERATORS,APPLYOPERATORV)
1060  // (&numDim,&bufferSizes[0],&one,&numPointsBuffer,
1061  // &dDir,opInterval,&numStencils,stencilSizes,
1062  // stencilStarts,&numStencilValues,stencilWeights,
1063  // stencilOffsets,&stencilConn[dirOffset],
1064  // &uBuffer[compOffset],&duBuffer[compOffset]);
1065 
1066 #pragma omp barrier
1067  if(globalPtr){
1068 #pragma omp master
1069  {
1070  globalPtr->FunctionExit("ApplyDissOperator");
1071  }
1072  }
1073 
1074  return(0);
1075  };
1076 
1077  void GridScaleRHS(int threadId){
1078 
1079  if(globalPtr){
1080 #pragma omp barrier
1081 #pragma omp master
1082  {
1083  globalPtr->FunctionEntry("GridScaleRHS");
1084  }
1085  }
1086 
1087  size_t *dOpInterval = &fortranPartitionBufferIntervals[threadId][0];
1088  std::vector<double> &gridJacobian(gridPtr->Jacobian());
1089 
1090  // if(gridType > simulation::grid::UNIRECT){
1091  for(int iEquation = 0;iEquation < numEquations;iEquation++){
1092  double *rhsPtr = myRHSBuffers[iEquation];
1094  (&numDim,&numPointsBuffer,&bufferSizes[0],dOpInterval,&gridJacobian[0],rhsPtr);
1095  }
1096  // } else {
1097  // for(int iEquation = 0;iEquation < numEquations;iEquation++){
1098  // double *rhsPtr = myRHSBuffers[iEquation];
1099  // FC_MODULE(operators,xax,OPERATORS,XAX)
1100  // (&numDim,&numPointsBuffer,&bufferSizes[0],dOpInterval,&gridJacobian[0],rhsPtr);
1101  // }
1102  // }
1103 
1104  if(globalPtr){
1105 #pragma omp barrier
1106 #pragma omp master
1107  {
1108  globalPtr->FunctionExit("GridScaleRHS");
1109  }
1110  }
1111  };
1112 
1113 
1114  void AccumulateToRHS(double a,double *dfBuffer,int threadId)
1115  {
1116  if(globalPtr){
1117 #pragma omp barrier
1118 #pragma omp master
1119  {
1120  globalPtr->FunctionEntry("AccumulateRHS");
1121  }
1122  }
1123 
1124  size_t *dOpInterval = &fortranPartitionBufferIntervals[threadId][0];
1125 
1126  // Y = a*X + Y
1127 
1128  for(int iComp = 0;iComp < numEquations;iComp++){
1130  (&numDim,&numPointsBuffer,&bufferSizes[0],dOpInterval,&a,
1131  &dfBuffer[iComp*numPointsBuffer],myRHSBuffers[iComp]);
1132  }
1133 
1134 
1135  if(globalPtr){
1136 #pragma omp barrier
1137 #pragma omp master
1138  {
1139  globalPtr->FunctionExit("AccumulateRHS");
1140  }
1141  }
1142 
1143  };
1144 
1145 
1146  int ComputeDV(int threadId)
1147  {
1148 
1149  if(globalPtr){
1150 #pragma omp barrier
1151 #pragma omp master
1152  {
1153  globalPtr->FunctionEntry("ComputeDV");
1154  }
1155  }
1156 
1157  // Need non-forticated intervals here
1158  const std::vector<pcpp::IndexIntervalType>
1159  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
1160 
1161  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
1162  std::vector<size_t> &fortranOpInterval(fortranPartitionBufferIntervals[threadId]);
1163 
1164  double zero = 0.0;
1165  int returnCode = 0;
1166  // Computed on local partition only
1167  // 1/rho, velocity, internal energy
1168  returnCode = euler::util::ComputeDVBuffer(regionInterval,bufferSizes,
1169  myStateBuffers,dvBuffers);
1170 
1171  // compute pressure and temperature from the eos
1172  returnCode = eosPtr->ComputePressureBuffer(regionInterval,bufferSizes);
1173  returnCode = eosPtr->ComputeTemperatureBuffer(regionInterval,bufferSizes);
1174 
1175  if(!returnCode && exchangeDV){
1176  returnCode = ExchangeDV(threadId);
1177  }
1178 
1179  if(globalPtr){
1180 #pragma omp barrier
1181 #pragma omp master
1182  {
1183  globalPtr->FunctionExit("ComputeDV");
1184  }
1185  }
1186 
1187  return(returnCode);
1188 
1189  };
1190 
1191  int ComputeTV(int threadId)
1192  {
1193  if(globalPtr){
1194 #pragma omp barrier
1195 #pragma omp master
1196  {
1197  globalPtr->FunctionEntry("ComputeTV");
1198  }
1199  }
1200 
1201  // Need non-forticated intervals here
1202  const std::vector<pcpp::IndexIntervalType>
1203  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
1204 
1205  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
1206  std::vector<size_t> &fortranOpInterval(fortranPartitionBufferIntervals[threadId]);
1207 
1208  int returnCode = 0;
1209  // Computed on local partition only
1210  if(transportModel == POWER) {
1211  double heatCapacityCp = eosPtr->GetHeatCapacityCp();
1212  returnCode = viscid::util::ComputeTVBufferPower(regionInterval,bufferSizes,
1213  temperaturePtr,tvBuffers,
1214  beta,power,bulkViscFac,heatCapacityCp,refPr);
1215  } else {
1216  std::ostringstream errMsg;
1217  errMsg << "NavierStokesRHS::RHS:Error: Unknown transport model in NavierStokesRHS. Aborting."
1218  << std::endl;
1219  if(globalPtr){
1220  globalPtr->StdOut(errMsg.str());
1221  globalPtr->FunctionExit("ComputeTV");
1222  } else {
1223  std::cout << errMsg.str();
1224  }
1225 
1226  return(1);
1227  }
1228 
1229  if(globalPtr){
1230 #pragma omp barrier
1231 #pragma omp master
1232  {
1233  globalPtr->FunctionExit("ComputeTV");
1234  }
1235  }
1236 
1237  return(returnCode);
1238  };
1239 
1240  int InviscidFlux(int iDim,int threadId)
1241  {
1242  // - Inviscid flux - (local if exchangeFlux,+halos otherwise)
1243 
1244  if(globalPtr){
1245 #pragma omp barrier
1246 #pragma omp master
1247  {
1248  globalPtr->FunctionEntry("InviscidFlux");
1249  }
1250  }
1251 
1252  size_t *fluxOpInterval = NULL;
1253  if(exchangeFlux){
1254  fluxOpInterval = &fortranPartitionBufferIntervals[threadId][0];
1255  } else {
1256  fluxOpInterval = &fortranBufferIntervals[threadId][0];
1257  }
1258 
1259  if(globalPtr){
1260 #pragma omp barrier
1261 #pragma omp master
1262  {
1263  globalPtr->FunctionEntry("ComputeVHat");
1264  }
1265  }
1266 
1267  size_t dirOffset = iDim*numPointsBuffer;
1268  std::vector<double> &gridMetric(gridPtr->Metric());
1269  double *velocity = &velocityPtr[0];
1270  int fluxDim = iDim + 1;
1271 
1272  // 0. Get vHat(iDim) = v * Xi_Hat(iDim)
1273  FC_MODULE(metricops,vhatcomponent,METRICOPS,VHATCOMPONENT)
1274  (&numDim,&numPointsBuffer,&bufferSizes[0],fluxOpInterval,
1275  &gridType,&gridMetric[0],&fluxDim,&velocity[0],velHat);
1276 
1277  if(globalPtr){
1278 #pragma omp barrier
1279 #pragma omp master
1280  {
1281  globalPtr->FunctionExit("ComputeVHat");
1282  globalPtr->FunctionEntry("Flux1D");
1283  }
1284  }
1285 
1286  // 1. get f(iDim,1) = rho*vHat(iDim)
1287  // 2. get f(iDim,n={2,3,4}) = rho*vHat(iDim)*v(n) + p*delta_ij
1288  // 3. get f(iDim,numDim+2) = (rhoE + p)*vHat(iDim)
1289  FC_MODULE(euler,flux1d,EULER,FLUX1D)
1290  (&numDim,&numPointsBuffer,&bufferSizes[0],fluxOpInterval,
1291  &fluxDim,&gridType,&gridMetric[0],rhoPtr,rhoVPtr,rhoEPtr,
1292  &velHat[0],pressurePtr,inviscidFluxBuffer);
1293 
1294 // std::ofstream Ouf;
1295 // std::ostringstream oufName;
1296 // oufName << "iflux_" << fluxDim << "_" << (tauCounter-1);
1297 // Ouf.open(oufName.str().c_str());
1298 // Ouf << std::setprecision(12);
1299 // Ouf << numEquations << " " << numPointsBuffer << std::endl;
1300 // double *tPtr = inviscidFluxBuffer;
1301 // for(int iComp = 0;iComp < numEquations;iComp++){
1302 // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
1303 // Ouf << tPtr[iComp*numPointsBuffer+iPoint] << " ";
1304 // Ouf << std::endl;
1305 // }
1306 // Ouf << std::endl;
1307 // Ouf.close();
1308 
1309 
1310  if(globalPtr){
1311 #pragma omp barrier
1312 #pragma omp master
1313  {
1314  globalPtr->FunctionExit("Flux1D");
1315  }
1316  }
1317 
1318  if(numScalar > 0) {
1319 
1320  if(globalPtr){
1321 #pragma omp barrier
1322 #pragma omp master
1323  {
1324  globalPtr->FunctionEntry("ScalarFlux1D");
1325  }
1326  }
1327 
1328  // 4. get f(iDim,numDim+3:+) = Y_n*vHat(iDim) [scalar transport]
1329  FC_MODULE(euler,scalarflux1d,EULER,SCALARFLUX1D)
1330  (&numDim,&numPointsBuffer,&bufferSizes[0],fluxOpInterval,
1331  &numScalar,scalarPtr,&velHat[0],inviscidFluxBuffers[numDim+2]);
1332 
1333  if(globalPtr){
1334 #pragma omp barrier
1335 #pragma omp master
1336  {
1337  globalPtr->FunctionExit("ScalarFlux1D");
1338  }
1339  }
1340 
1341  }
1342 
1343  if(globalPtr){
1344 #pragma omp barrier
1345 #pragma omp master
1346  {
1347  globalPtr->FunctionExit("InviscidFlux");
1348  }
1349  }
1350 
1351  return(0);
1352  };
1353 
1354  int ComputeVelDiv(int threadId) // assumes velgrad is already computed
1355  {
1356  if(globalPtr){
1357 #pragma omp barrier
1358 #pragma omp master
1359  {
1360  globalPtr->FunctionEntry("ComputeVelDiv");
1361  }
1362  }
1363 
1364  const std::vector<pcpp::IndexIntervalType>
1365  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
1366  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
1367  std::vector<size_t> &fortranOpInterval(fortranPartitionBufferIntervals[threadId]);
1368  std::vector<double> &gridMetric(gridPtr->Metric());
1369  std::vector<double> &gridJacobian(gridPtr->Jacobian());
1370 
1371  // Zero the veldiv buffer [kernel used below is of (+=) type]
1372  double zero = 0.0;
1374  (&numDim,&numPointsBuffer,&bufferSizes[0],
1375  &fortranOpInterval[0],&zero,velDiv);
1376 
1377  FC_MODULE(metricops,ijkgradtoxyzdiv,METRICOPS,IJKGRADTOXYZDIV)
1378  (&numDim,&numPointsBuffer,&bufferSizes[0],&fortranOpInterval[0],
1379  &gridType,&gridJacobian[0],&gridMetric[0],velGradBufferPtr,
1380  velDiv);
1381 
1382  if(globalPtr){
1383 #pragma omp barrier
1384 #pragma omp master
1385  {
1386  globalPtr->FunctionExit("ComputeVelDiv");
1387  }
1388  }
1389 
1390  return(0);
1391 
1392  };
1393 
1394  int ComputeVelGrad(int threadId)
1395  {
1396 
1397  if(globalPtr){
1398 #pragma omp barrier
1399 #pragma omp master
1400  {
1401  globalPtr->FunctionEntry("ComputeVelGrad");
1402  }
1403  }
1404 
1405 
1406  // calculate dV_j/d(Xi)_i
1407  for(int i=0;i<numDim;i++){
1408  ApplyOperator(i,numDim,velocityPtr,velGradBuffers[i],threadId);
1409  }
1410 
1411  if(globalPtr){
1412 #pragma omp barrier
1413 #pragma omp master
1414  {
1415  globalPtr->FunctionExit("ComputeVelGrad");
1416  }
1417  }
1418 
1419  return(0);
1420  };
1421 
1422  int ComputeTemperatureGrad(int threadId)
1423  {
1424  if(globalPtr){
1425 #pragma omp barrier
1426 #pragma omp master
1427  {
1428  globalPtr->FunctionEntry("ComputeTempGrad");
1429  }
1430  }
1431 
1432  // Need non-forticated intervals here
1433  const std::vector<pcpp::IndexIntervalType>
1434  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
1435  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
1436  std::vector<size_t> &fortranOpInterval(fortranPartitionBufferIntervals[threadId]);
1437 
1438  // calculate dT/dX_j
1439  for(int i=0;i<numDim;i++){
1440  ApplyOperator(i,1,temperaturePtr,temperatureGradBuffers[i],threadId);
1441  }
1442 
1443  if(globalPtr){
1444 #pragma omp barrier
1445 #pragma omp master
1446  {
1447  globalPtr->FunctionExit("ComputeTempGrad");
1448  }
1449  }
1450 
1451  return(0);
1452  };
1453 
1454  int ComputeStressTensor(int threadId)
1455  {
1456  if(globalPtr){
1457 #pragma omp barrier
1458 #pragma omp master
1459  {
1460  globalPtr->FunctionEntry("ComputeStressTensor");
1461  }
1462  }
1463 
1464  const std::vector<pcpp::IndexIntervalType>
1465  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
1466  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
1467  std::vector<double> &gridMetric(gridPtr->Metric());
1468  std::vector<double> &gridJacobian(gridPtr->Jacobian());
1469 
1470  int returnCode = viscid::util::ComputeTauBuffer(regionInterval, bufferSizes,
1471  gridType,gridMetric, gridJacobian, refRe,
1472  tvBuffers, velGradBuffers, tauBuffers);
1473 
1474 // std::ofstream Ouf;
1475 // std::ostringstream oufName;
1476 // oufName << "tau_" << tauCounter++;
1477 // Ouf.open(oufName.str().c_str());
1478 // Ouf << std::setprecision(12);
1479 // int numComp = tauBuffers.size();
1480 // Ouf << numComp << " " << numPointsBuffer << std::endl;
1481 // std::vector<double *>::iterator tauBufferIt = tauBuffers.begin();
1482 // while(tauBufferIt != tauBuffers.end()){
1483 // double *tPtr = *tauBufferIt++;
1484 // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
1485 // Ouf << tPtr[iPoint] << " ";
1486 // Ouf << std::endl;
1487 // }
1488 // Ouf << std::endl;
1489 // Ouf.close();
1490 
1491  if(globalPtr){
1492 #pragma omp barrier
1493 #pragma omp master
1494  {
1495  globalPtr->FunctionExit("ComputeStressTensor");
1496  }
1497  }
1498 
1499  return(0);
1500  };
1501 
1502  int ComputeHeatFlux(int threadId)
1503  {
1504 
1505  if(globalPtr){
1506 #pragma omp barrier
1507 #pragma omp master
1508  {
1509  globalPtr->FunctionEntry("ComputeHeatFlux");
1510  }
1511  }
1512 
1513  const std::vector<pcpp::IndexIntervalType>
1514  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
1515  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
1516  std::vector<double> &gridMetric(gridPtr->Metric());
1517  std::vector<double> &gridJacobian(gridPtr->Jacobian());
1518 
1519 
1520  int returnCode = viscid::util::ComputeHeatFluxBuffer(regionInterval, bufferSizes,
1522  tvBuffers, temperatureGradBuffers, heatFluxBuffers);
1523 
1524 // std::ofstream Ouf;
1525 // std::ostringstream oufName;
1526 // oufName << "q_" << (tauCounter-1);
1527 // Ouf.open(oufName.str().c_str());
1528 // Ouf << std::setprecision(12);
1529 // int numComp = heatFluxBuffers.size();
1530 // Ouf << numComp << " " << numPointsBuffer << std::endl;
1531 // std::vector<double *>::iterator tauBufferIt = heatFluxBuffers.begin();
1532 // while(tauBufferIt != heatFluxBuffers.end()){
1533 // double *tPtr = *tauBufferIt++;
1534 // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
1535 // Ouf << tPtr[iPoint] << " ";
1536 // Ouf << std::endl;
1537 // }
1538 // Ouf << std::endl;
1539 // Ouf.close();
1540 
1541 
1542  if(globalPtr){
1543 #pragma omp barrier
1544 #pragma omp master
1545  {
1546  globalPtr->FunctionExit("ComputeHeatFlux");
1547  }
1548  }
1549  return(0);
1550  };
1551 
1552  int ComputeViscidEnergyFlux(int threadId)
1553  {
1554 
1555  if(globalPtr){
1556 #pragma omp barrier
1557 #pragma omp master
1558  {
1559  globalPtr->FunctionEntry("ViscidEnergyFlux");
1560  }
1561  }
1562 
1563  const std::vector<pcpp::IndexIntervalType>
1564  &threadIntervals(gridPtr->ThreadPartitionBufferIntervals());
1565  const pcpp::IndexIntervalType &regionInterval(threadIntervals[threadId]);
1566  const size_t *opInterval = &fortranPartitionBufferIntervals[threadId][0];
1567 
1568  for(int iDim = 0;iDim < numDim;iDim++){
1569 
1570  // Compute Q_i = -q_i + (v_j * tau_i_j)
1571  for(int iVel = 0;iVel < numDim;iVel++){
1572  int tensorIndex = iDim*numDim + iVel;
1573  size_t dirOffset = iVel*numPointsBuffer;
1574 
1576  (&numDim,&numPointsBuffer,&bufferSizes[0],opInterval,
1577  &velocityPtr[dirOffset],tauBuffers[tensorIndex],heatFluxBuffers[iDim]);
1578 
1579  }
1580  }
1581 
1582  if(globalPtr){
1583 #pragma omp barrier
1584 #pragma omp master
1585  {
1586  globalPtr->FunctionExit("ViscidEnergyFlux");
1587  }
1588  }
1589  return(0);
1590  };
1591 
1592  int ViscidFlux(int iDim,int threadId)
1593  {
1594  if(globalPtr){
1595 #pragma omp barrier
1596 #pragma omp master
1597  {
1598  globalPtr->FunctionEntry("ViscidFlux");
1599  }
1600  }
1601 
1602  size_t *fluxOpInterval = NULL;
1603  if(exchangeFlux){
1604  fluxOpInterval = &fortranPartitionBufferIntervals[threadId][0];
1605  } else {
1606  fluxOpInterval = &fortranBufferIntervals[threadId][0];
1607  }
1608  std::vector<double> &gridMetric(gridPtr->Metric());
1609 
1610  // stress tensor indexing
1611  // 2D, x: 0, 1
1612  // y: 1, 2
1613  // 3D, x: 0, 1, 2
1614  // y: 1, 3, 4
1615  // z: 2, 4, 5
1616  int fluxDim = iDim + 1;
1617  int tau1=iDim*numDim;
1618  int tau2=tau1+1;
1619  int tau3=tau2+1;
1620 
1621 
1622  // viscous fluxes
1623  FC_MODULE(viscid,strongflux1d,VISCID,STRONGFLUX1D)
1624  (&numDim,&fluxDim,&bufferSizes[0],&numPointsBuffer,fluxOpInterval,
1625  &gridType,&gridMetric[0],tauBufferPtr,heatFluxBufferPtr,
1626  viscidFluxBuffer);
1627 
1628 // std::ofstream Ouf;
1629 // std::ostringstream oufName;
1630 // oufName << "vflux_" << fluxDim << "_" << (tauCounter-1);
1631 // Ouf.open(oufName.str().c_str());
1632 // Ouf << std::setprecision(12);
1633 // Ouf << numEquations << " " << numPointsBuffer << std::endl;
1634 // double *tPtr = viscidFluxBuffer;
1635 // for(int iComp = 1;iComp < numEquations;iComp++){
1636 // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
1637 // Ouf << tPtr[iComp*numPointsBuffer+iPoint] << " ";
1638 // Ouf << std::endl;
1639 // }
1640 // Ouf << std::endl;
1641 // Ouf.close();
1642 
1643 
1644  if(globalPtr){
1645 #pragma omp barrier
1646 #pragma omp master
1647  {
1648  globalPtr->FunctionExit("ViscidFlux");
1649  }
1650  }
1651 
1652  return(0);
1653  };
1654 
1655 
1657  void AccumulateViscousFluxes(double a,std::vector<double *> &X,std::vector<double *> &Y,
1658  int threadId,bool fullInterval = false)
1659  {
1660 
1661  size_t *bufferOpInterval = NULL;
1662  if(fullInterval){
1663  bufferOpInterval = &fortranPartitionBufferIntervals[threadId][0];
1664  } else {
1665  bufferOpInterval = &fortranBufferIntervals[threadId][0];
1666  }
1667 
1668  for(int iComp = 1;iComp < numDim+2;iComp++){
1670  (&numDim,&numPointsBuffer,&bufferSizes[0],bufferOpInterval,&a,
1671  X[iComp],Y[iComp]);
1672  }
1673  }
1674 
1675  // ============= Messaging =================
1676  void PackDVMessage(int threadId)
1677  {
1678  if(threadId == 0){
1679  for(int iDim = 0;iDim < numDV;iDim++)
1680  haloPtr->PackMessageBuffers(dvMessageId,iDim,1,dvBuffers[iDim]);
1681  }
1682  return;
1683  };
1684 
1685  void PackBufferMessage(int messageId,int numEquations,double *dataBuffer,int threadId)
1686  {
1687  if(threadId == 0){
1688  haloPtr->PackMessageBuffers(messageId,0,numEquations,dataBuffer);
1689  }
1690  };
1691  void UnpackBufferMessage(int messageId,int numEquations,double *dataBuffer,int threadId)
1692  {
1693  if(threadId == 0){
1694  haloPtr->UnPackMessageBuffers(messageId,0,numEquations,dataBuffer);
1695  }
1696  }
1697  void ExchangeBufferMessage(int messageId) // call with threadid == 0 *only* (for now)
1698  {
1699  haloPtr->SendMessage(messageId,gridNeighbors,*gridCommPtr);
1700  haloPtr->ReceiveMessage(messageId,gridNeighbors,*gridCommPtr);
1701  };
1702 
1703  void UnpackDVMessage(int threadId)
1704  {
1705  if(threadId == 0){
1706  for(int iDim = 0;iDim < numDV;iDim++)
1707  haloPtr->UnPackMessageBuffers(dvMessageId,iDim,1,dvBuffers[iDim]);
1708  }
1709  return;
1710  };
1711 
1712  void ExchangeDVMessage() // call with threadid == 0 *only* (for now)
1713  {
1714  haloPtr->SendMessage(dvMessageId,gridNeighbors,*gridCommPtr);
1715  haloPtr->ReceiveMessage(dvMessageId,gridNeighbors,*gridCommPtr);
1716  };
1717 
1718  int ExchangeDV(int threadId)
1719  {
1720  if(globalPtr){
1721 #pragma omp barrier
1722 #pragma omp master
1723  {
1724  globalPtr->FunctionEntry("ExchangeDV");
1725  }
1726  }
1727 
1728  PackDVMessage(threadId);
1729 #pragma omp barrier
1730 #pragma omp master
1731  {
1732  ExchangeDVMessage();
1733  }
1734 #pragma omp barrier
1735  UnpackDVMessage(threadId);
1736  if(globalPtr){
1737 #pragma omp barrier
1738 #pragma omp master
1739  {
1740  globalPtr->FunctionExit("ExchangeDV");
1741  }
1742  }
1743  return(0);
1744 
1745  };
1746 
1747  int ExchangeBuffer(int messageId,int numEquations,double *dataBuffer,int threadId)
1748  {
1749  if(globalPtr){
1750 #pragma omp barrier
1751 #pragma omp master
1752  {
1753  globalPtr->FunctionEntry("ExchangeBuffer");
1754  }
1755  }
1756  PackBufferMessage(messageId,numEquations,dataBuffer,threadId);
1757 #pragma omp barrier
1758 
1759 #pragma omp master
1760  {
1761  ExchangeBufferMessage(messageId);
1762  }
1763 
1764 #pragma omp barrier
1765 
1766  UnpackBufferMessage(messageId,numEquations,dataBuffer,threadId);
1767 
1768  if(globalPtr){
1769 #pragma omp barrier
1770 #pragma omp master
1771  {
1772  globalPtr->FunctionExit("ExchangeBuffer");
1773  }
1774  }
1775 
1776  return(0);
1777 
1778  };
1779 
1780 
1781  int ExchangeInviscidFlux(int threadId)
1782  {
1783  return(ExchangeBuffer(fluxMessageId,numEquations,inviscidFluxBuffer,threadId));
1784  };
1785 
1786  int ExchangeState(int threadId)
1787  {
1788  if(globalPtr){
1789  #pragma omp barrier
1790  #pragma omp master
1791  {
1792  globalPtr->FunctionEntry("ExchangeState");
1793  }
1794  }
1795 
1796 #pragma omp master
1797  {
1798  haloPtr->PackMessageBuffers(fluxMessageId,0,1,rhoPtr);
1799  haloPtr->PackMessageBuffers(fluxMessageId,1,numDim,rhoVPtr);
1800  haloPtr->PackMessageBuffers(fluxMessageId,numDim+1,1,rhoEPtr);
1801 
1802  haloPtr->SendMessage(fluxMessageId,gridNeighbors,gridPtr->Communicator());
1803  haloPtr->ReceiveMessage(fluxMessageId,gridNeighbors,gridPtr->Communicator());
1804 
1805  haloPtr->UnPackMessageBuffers(fluxMessageId,0,1,rhoPtr);
1806  haloPtr->UnPackMessageBuffers(fluxMessageId,1,numDim,rhoVPtr);
1807  haloPtr->UnPackMessageBuffers(fluxMessageId,numDim+1,1,rhoEPtr);
1808  }
1809 
1810  if(globalPtr){
1811  #pragma omp barrier
1812  #pragma omp master
1813  {
1814  globalPtr->FunctionExit("ExchangeState");
1815  }
1816  }
1817 
1818  return 0;
1819  };
1820 
1821  // ===== State-related interfaces ====
1822 
1823  // *NOT* thread safe - master only
1824  //
1825  int SetParamState(const StateType &paramState)
1826  {
1827 
1828 #pragma omp barrier
1829  if(globalPtr){
1830 #pragma omp master
1831  globalPtr->FunctionEntry("SetParamState");
1832  }
1833 
1834  //
1835  // get non-dimensional reference parameters
1836  //
1837 
1839  {
1840  int paramHandle = paramState.GetDataIndex("refRe");
1841  if(paramHandle >= 0){
1842  const pcpp::field::dataset::DataBufferType &paramData(paramState.Field(paramHandle));
1843  const double *paramPtr = paramData.Data<double>();
1844  refRe = *paramPtr;
1845  if(refRe < 0)
1846  refRe = 0;
1847  } else {
1848  refRe = 0.0;
1849  }
1850  }
1851 
1852  {
1853  int paramHandle = paramState.GetDataIndex("refPr");
1854  if(paramHandle >= 0){
1855  const pcpp::field::dataset::DataBufferType &paramData(paramState.Field(paramHandle));
1856  const double *paramPtr = paramData.Data<double>();
1857  refPr = *paramPtr;
1858  if(refPr < 0)
1859  refPr = 0.75;
1860  } else {
1861  refPr = 0.75;
1862  }
1863  }
1864 
1865  //
1866  // Define the reference state based on the density, pressure, and temperature
1867  // all other reference parameters derive as follows for a calorically perfect, ideal gas
1868  //
1869  // p_ref = p_inf
1870  // rho_ref = rho_inf
1871  // temperature_ref = temperature_inf
1872  //
1873  // The user has the option to run in dimensional or non-dimensional mode
1874  // dimensional mode calculates required EOS parameters (i.e. the specific gas constant
1875  // for a perfect gas) and sets the above to one
1876  //
1877  // The default behavior to run in non-dimensional mode with STP reference conditions for air
1878  //
1879  {
1880  int paramHandle = paramState.GetDataIndex("nonDimensional");
1881  std::ostringstream statusMsg;
1882  statusMsg << (paramHandle >= 0 ? "User chose" : "Defaulting to");
1883  if(paramHandle >= 0){
1884  const pcpp::field::dataset::DataBufferType &paramData(paramState.Field(paramHandle));
1885  const int *paramPtr = paramData.Data<int>();
1886  nonDimensionalMode = *paramPtr;
1887  statusMsg << (nonDimensionalMode==1 ? " non-dimensional " :
1888  nonDimensionalMode==2 ? " PlasComCM-non-dimensional " :
1889  " dimensional ");
1890  } else {
1891  nonDimensionalMode = 1;
1892  statusMsg << " non-dimensional ";
1893  }
1894  statusMsg << "mode." << std::endl;
1895  if(globalPtr){
1896  globalPtr->StdOut(statusMsg.str());
1897  } else {
1898  std::cout << statusMsg.str();
1899  }
1900  }
1901 
1902  //
1903  // get the reference state from the parameters
1904  // if it is not defined, or incomplete, we specify a default state
1905  // and warn the user
1906  //
1907  bool haveRefRho = false;
1908  bool haveRefPressure = false;
1909  bool haveRefTemperature = false;
1910  {
1911  int paramHandle = paramState.GetDataIndex("refRho");
1912  if(paramHandle >= 0){
1914  &paramData(paramState.Field(paramHandle));
1915  const double *paramPtr = paramData.Data<double>();
1916  refRho = *paramPtr;
1917  haveRefRho = true;
1918  }
1919  }
1920 
1921  {
1922  int paramHandle = paramState.GetDataIndex("refPressure");
1923  if(paramHandle >= 0){
1925  &paramData(paramState.Field(paramHandle));
1926  const double *paramPtr = paramData.Data<double>();
1927  refPressure = *paramPtr;
1928  haveRefPressure = true;
1929  }
1930  }
1931 
1932  {
1933  int paramHandle = paramState.GetDataIndex("refTemperature");
1934  if(paramHandle >= 0){
1936  &paramData(paramState.Field(paramHandle));
1937  const double *paramPtr = paramData.Data<double>();
1938  refTemperature = *paramPtr;
1939  haveRefTemperature = true;
1940  }
1941  }
1942 
1943  if(!(haveRefRho && haveRefPressure && haveRefTemperature)){
1944  refRho = 1.225;
1945  refPressure = 101325;
1946  refTemperature = 288.15;
1947  std::ostringstream statusMessage;
1948  statusMessage << "Warning: EOS reference state missing or incomplete in input. " << std::endl
1949  << "User must specify refRho, refPressure, and refTemperature. " << std::endl
1950  << "Setting to default values: " << std::endl
1951  << "\t refRho=" << refRho << std::endl
1952  << "\t refPressure=" << refPressure << std::endl
1953  << "\t refTemperature=" << refTemperature << std::endl
1954  << std::endl;
1955  if(globalPtr){
1956  globalPtr->StdOut(statusMessage.str());
1957  } else {
1958  std::cout << statusMessage.str();
1959  }
1960  }
1961 
1962  {
1963  int paramHandle = paramState.GetDataIndex("refLength");
1964  if(paramHandle >= 0){
1966  &paramData(paramState.Field(paramHandle));
1967  const double *paramPtr = paramData.Data<double>();
1968  refLength = *paramPtr;
1969  } else {
1970  refLength = 1.0;
1971  std::ostringstream statusMessage;
1972  statusMessage << "Warning: Could not find refLength in input" << std::endl
1973  << "Setting to default value " << refLength << std::endl;
1974  if(globalPtr){
1975  globalPtr->StdOut(statusMessage.str());
1976  } else {
1977  std::cout << statusMessage.str();
1978  }
1979  }
1980  }
1981 
1982  //
1983  // initialize equation of state object
1984  // get data associated with the chosen equation of state (i.e. gamma)
1985  // and transport laws that may be needed to fully define the reference state
1986  //
1987  if(gasModel == IDEAL) {
1988  eosPtr = new eos::perfect_gas();
1989 
1990  gammaHandle = paramState.GetDataIndex("gamma");
1991  if(gammaHandle < 0) {
1992  std::ostringstream statusMessage;
1993  statusMessage << "Gamma does not exist in input parameter state."
1994  << " Exiting." << std::endl;
1995  if(globalPtr){
1996  globalPtr->StdOut(statusMessage.str());
1997  globalPtr->FunctionExit("SetParamState");
1998  } else {
1999  std::cout << statusMessage.str();
2000  }
2001  return(1);
2002  }
2003 
2004  const pcpp::field::dataset::DataBufferType &gammaData(paramState.Field(gammaHandle));
2005  const double *gammaPtr = gammaData.Data<double>();
2006  if(!gammaPtr) {
2007  std::ostringstream statusMessage;
2008  statusMessage << "Gamma not defined in input parameter state."
2009  << " Exiting." << std::endl;
2010  if(globalPtr){
2011  globalPtr->StdOut(statusMessage.str());
2012  globalPtr->FunctionExit("SetParamState");
2013  } else {
2014  std::cout << statusMessage.str();
2015  }
2016  return(1);
2017  }
2018 
2019  eosPtr->SetGamma(*gammaPtr);
2020  } else {
2021  std::ostringstream statusMessage;
2022  statusMessage << "Error: Unknown gas model selected." << gasModel
2023  << " Exiting." << std::endl;
2024  if(globalPtr){
2025  globalPtr->StdOut(statusMessage.str());
2026  globalPtr->FunctionExit("SetParamState");
2027  } else {
2028  std::cout << statusMessage.str();
2029  }
2030  return(1);
2031  }
2032 
2033  if(refRe > 0) {
2034  if(transportModel == POWER) {
2035 
2036  // power
2037  powerHandle = paramState.GetDataIndex("power");
2038  if(powerHandle < 0) {
2039  std::ostringstream statusMessage;
2040  statusMessage << "Power does not exist in input parameter state." << std::endl
2041  << " Required for the Power Law transport model." << std::endl
2042  << " Exiting." << std::endl;
2043  if(globalPtr){
2044  globalPtr->StdOut(statusMessage.str());
2045  globalPtr->FunctionExit("SetParamState");
2046  } else {
2047  std::cout << statusMessage.str();
2048  }
2049  return(1);
2050  }
2051 
2053  &powerData(paramState.Field(powerHandle));
2054  powerPtr = powerData.Data<double>();
2055  if(!powerPtr) {
2056  std::ostringstream statusMessage;
2057  statusMessage << "Power not defined in input parameter state." << std::endl
2058  << "Required for the Power Law transport model." << std::endl
2059  << "Example: power=2/3 for air." << std::endl
2060  << "Exiting." << std::endl;
2061  if(globalPtr){
2062  globalPtr->StdOut(statusMessage.str());
2063  globalPtr->FunctionExit("SetParamState");
2064  } else {
2065  std::cout << statusMessage.str();
2066  }
2067  return(1);
2068  }
2069  power = *powerPtr;
2070 
2071  // beta
2072  betaHandle = paramState.GetDataIndex("beta");
2073  if(betaHandle < 0) {
2074  std::ostringstream statusMessage;
2075  statusMessage << "beta does not exist in input parameter state." << std::endl
2076  << "Required for the beta Law transport model." << std::endl
2077  << "Exiting." << std::endl;
2078  if(globalPtr){
2079  globalPtr->StdOut(statusMessage.str());
2080  globalPtr->FunctionExit("SetParamState");
2081  } else {
2082  std::cout << statusMessage.str();
2083  }
2084  return(1);
2085  }
2086 
2088  &betaData(paramState.Field(betaHandle));
2089  betaPtr = betaData.Data<double>();
2090  if(!betaPtr) {
2091  std::ostringstream statusMessage;
2092  statusMessage << "beta not defined in input parameter state." << std::endl
2093  << "Required for the beta Law transport model." << std::endl
2094  << "Example: beta=4.093*10^-7 for air." << std::endl
2095  << "Exiting." << std::endl;
2096  if(globalPtr){
2097  globalPtr->StdOut(statusMessage.str());
2098  globalPtr->FunctionExit("SetParamState");
2099  } else {
2100  std::cout << statusMessage.str();
2101  }
2102  return(1);
2103  }
2104  beta = *betaPtr;
2105 
2106  //bulkViscosity Factor
2107  bulkViscFacHandle = paramState.GetDataIndex("bulkViscFac");
2108  if(bulkViscFacHandle < 0) {
2109  std::ostringstream statusMessage;
2110  statusMessage << "bulkViscFac does not exist in input parameter state." << std::endl
2111  << "Required for the bulkViscFac Law transport model."<< std::endl
2112  << "Exiting." << std::endl;
2113  if(globalPtr){
2114  globalPtr->StdOut(statusMessage.str());
2115  globalPtr->FunctionExit("SetParamState");
2116  } else {
2117  std::cout << statusMessage.str();
2118  }
2119  return(1);
2120  }
2121 
2123  &bulkViscFacData(paramState.Field(bulkViscFacHandle));
2124  bulkViscFacPtr = bulkViscFacData.Data<double>();
2125  if(!bulkViscFacPtr) {
2126  std::ostringstream statusMessage;
2127  statusMessage << "bulkViscFac not defined in input parameter state." << std::endl
2128  << "Required for the bulkViscFac Law transport model."<< std::endl
2129  << "Example: bulkViscFac=0.6 for air." << std::endl
2130  << "Exiting." << std::endl;
2131  if(globalPtr){
2132  globalPtr->StdOut(statusMessage.str());
2133  globalPtr->FunctionExit("SetParamState");
2134  } else {
2135  std::cout << statusMessage.str();
2136  }
2137  return(1);
2138  }
2139  bulkViscFac = *bulkViscFacPtr;
2140 
2141  } else {
2142  std::ostringstream statusMessage;
2143  statusMessage << "Error: Unknown transport model selected."
2144  << transportModel << " Exiting." << std::endl;
2145  if(globalPtr){
2146  globalPtr->StdOut(statusMessage.str());
2147  globalPtr->FunctionExit("SetParamState");
2148  } else {
2149  std::cout << statusMessage.str();
2150  }
2151  return(1);
2152  }
2153  }
2154 
2155  //
2156  // derive the remaining reference state variables
2157  //
2158  if(gasModel == IDEAL) {
2159  double gamma = eosPtr->GetGamma();
2160  if(nonDimensionalMode == 1){
2161  specGasConst = 1.0;
2162  refSpecGasConst = refPressure/refRho/refTemperature;
2163  refVelocity = sqrt(refPressure/refRho);
2164  refSndSpd = refVelocity*sqrt(gamma);
2165  refEnergy = refPressure/(gamma-1.0)/refRho;
2166  refCp = refSpecGasConst*gamma/(gamma-1.0);
2167  refCv = refCp-refSpecGasConst;
2168 
2169 
2170  pressureScale = refPressure;
2171  temperatureScale = refTemperature;
2172  velocityScale = refVelocity;
2173 
2174  } else if(nonDimensionalMode == 2){
2175 
2176  specGasConst = (gamma-1)/gamma;
2177  refSpecGasConst = refPressure/refRho/refTemperature;
2178  refVelocity = sqrt(gamma*refPressure/refRho);
2179  refSndSpd = refVelocity;
2180  refEnergy = refPressure/(gamma-1.0)/refRho;
2181  refCp = refSpecGasConst*gamma/(gamma-1.0);
2182  refCv = refCp-refSpecGasConst;
2183 
2184  pressureScale = refRho*refSndSpd*refSndSpd;
2185  temperatureScale = (gamma-1)*refTemperature;
2186  velocityScale = refSndSpd;
2187 
2188  std::ostringstream statusMessage;
2189  statusMessage << "Enabling legacy PlasComCM nonDimensionalization mode." << std::endl;
2190  if(globalPtr){
2191  globalPtr->StdOut(statusMessage.str());
2192  } else {
2193  std::cout << statusMessage.str();
2194  }
2195 
2196  } else {
2197 
2198  refVelocity = sqrt(refPressure/refRho);
2199  specGasConst = refPressure/refRho/refTemperature;
2200  refSndSpd = refVelocity*sqrt(gamma);
2201  refCp = specGasConst*gamma/(gamma-1.0);
2202  refCv = refCp-specGasConst;
2203  refEnergy = refPressure/(gamma-1.0)/refRho;
2204 
2205  refRho = 1.0;
2206  refPressure = 1.0;
2207  refTemperature = 1.0;
2208  refVelocity = 1.0;
2209  refSpecGasConst = 1.0;
2210 
2211  pressureScale = 1.0;
2212  temperatureScale = 1.0;
2213  velocityScale = 1.0;
2214  }
2215 
2216  eosPtr->SetSpecificGasConstant(specGasConst);
2217  eosPtr->InitializeMaterialProperties();
2218  }
2219 
2220 
2221  //
2222  // in dimensional mode all the non-dimensional numbers become unity
2223  //
2224  if(refRe>0){
2225  if(nonDimensionalMode != 0){
2226  refMu = refRho*refVelocity*refLength/refRe;
2227  refKappa = refCp*refMu/refPr;
2228  } else {
2229  refRe = 1.0;
2230  refMu = 1.0;
2231  }
2232  }
2233 
2234  //
2235  // Scale the input parameters for non-dimensional runs
2236  //
2237  if(refRe>0){
2238  if(nonDimensionalMode == 1){
2239  if(transportModel == POWER){
2240  beta = 1.0;
2241  }
2242  } else if(nonDimensionalMode == 2){
2243  double gamma = eosPtr->GetGamma();
2244  if(transportModel == POWER){
2245  //beta = beta*pow(refTemperature,power)/refMu;
2246  beta = pow(gamma-1,power);
2247  }
2248  }
2249  }
2250 
2251 
2252  //
2253  // Report the current reference state to user
2254  std::ostringstream refStateMsg;
2255  refStateMsg << "NavierStokesRHS Reference State: " << std::endl
2256  << "Reynolds Number: \t" << refRe << std::endl
2257  << "Prandtl Number: \t" << refPr << std::endl
2258 
2259  << "Length Reference: \t" << refLength << std::endl
2260  << "Density Reference: \t" << refRho << std::endl
2261  << "Pressure Reference: \t" << refPressure << std::endl
2262  << "Temperature Reference: \t" << refTemperature << std::endl
2263 
2264  << "Specific Gas Constant Reference: \t" << refSpecGasConst << std::endl
2265  << "Energy Reference: \t" << refEnergy << std::endl
2266  << "Velocity Reference: \t" << refVelocity << std::endl
2267  << "Speed of Sound Reference: \t" << refSndSpd << std::endl
2268  << "Cp Reference: \t" << refCp << std::endl
2269  << "Cv Reference: \t" << refCv << std::endl;
2270 
2271  if(nonDimensionalMode == 0) {
2272  refStateMsg << "NavierStokesRHS set to run in dimensional mode." << std::endl
2273  << "Specific Gas Constant: \t" << specGasConst << std::endl
2274  << "Speed of sound: \t" << refSndSpd << std::endl
2275  << "Cp: \t" << eosPtr->GetHeatCapacityCp() << std::endl
2276  << "Cv: \t" << eosPtr->GetHeatCapacityCv() << std::endl;
2277  }
2278 
2279  if(globalPtr){
2280  globalPtr->StdOut(refStateMsg.str());
2281  } else {
2282  std::cout << refStateMsg.str();
2283  }
2284 
2285  std::ostringstream paramStatusMsg;
2286 
2287  //
2288  // initialization of other simulation parameters
2289  //
2290  inputCFL = 1.0;
2291  int inputCFLHandle = paramState.GetDataIndex("inputCFL");
2292  if(inputCFLHandle >= 0){
2293  const pcpp::field::dataset::DataBufferType &cflData(paramState.Field(inputCFLHandle));
2294  const double *inCFLPtr = cflData.Data<double>();
2295  inputCFL = *inCFLPtr;
2296  paramStatusMsg << "Input CFL = " << inputCFL << std::endl;
2297  }
2298  if(inputCFL <= 0){
2299  inputCFL = 1.0;
2300  paramStatusMsg << "Defaulting CFL = " << inputCFL << std::endl;
2301  }
2302 
2303 
2304  inputDT = -1.0;
2305  int inputDTHandle = paramState.GetDataIndex("inputDT");
2306  if(inputDTHandle >= 0){
2307  const pcpp::field::dataset::DataBufferType &inputDTData(paramState.Field(inputDTHandle));
2308  const double *inputDTPtr = inputDTData.Data<double>();
2309  inputDT = *inputDTPtr;
2310  paramStatusMsg << "Input DT = " << inputDT << std::endl;
2311  }
2312  if(inputDT < 0){ // inputDT = 0, rhs and advancer still operate, but state doesn't change (useful for testing)
2313  timeSteppingMode = CONSTANT_CFL;
2314  paramStatusMsg << "User-specified DT is invalid, defaulting to CONSTANT_CFL mode."
2315  << std::endl;
2316  }
2317 
2318  sigmaHandle = paramState.GetDataIndex("sigma");
2319  if(sigmaHandle >= 0){
2320  const pcpp::field::dataset::DataBufferType &sigmaData(paramState.Field(sigmaHandle));
2321  sigmaPtr = sigmaData.Data<double>();
2322  inputSigma = *sigmaPtr;
2323  } else {
2324  sigmaPtr = NULL;
2325  inputSigma = 0.0;
2326  }
2327 
2328  int sigmaDilHandle = paramState.GetDataIndex("sigmaDilatation");
2329  if(sigmaDilHandle >= 0){
2330  const pcpp::field::dataset::DataBufferType &sigmaData(paramState.Field(sigmaDilHandle));
2331  sigmaDilPtr = sigmaData.Data<double>();
2332  inputSigmaDil = *sigmaDilPtr;
2333  } else {
2334  sigmaDilPtr = NULL;
2335  inputSigmaDil = 0.0;
2336  }
2337 
2338 
2339  if(inputSigma > 0.0 || inputSigmaDil > 0.0){
2340  artificialDissipation = true;
2341  }
2342  if(artificialDissipation){
2343  if(inputSigma < 0 || inputSigmaDil < 0){
2344  return(1);
2345  }
2346  }
2347 
2348 
2349 
2350  {
2351  int paramHandle = paramState.GetDataIndex("Flags");
2352  if(paramHandle >= 0){
2353  const pcpp::field::dataset::DataBufferType &paramData(paramState.Field(paramHandle));
2354  const int *paramPtr = paramData.Data<int>();
2355  int optionFlags = *paramPtr;
2356  if(*paramPtr > 0){
2357  useAlternateRHS = optionFlags&ALTERNATERHS;
2358  exchangeFlux = optionFlags&EXCHANGEFLUX;
2359  exchangeDV = optionFlags&EXCHANGEDV;
2360  useWENO = optionFlags&USEWENO;
2361  }
2362  }
2363  if(refRe > 0){
2364  paramStatusMsg << "NavierStokesRHS: Viscous flow, enabling DV exchange." << std::endl;
2365  exchangeDV = true;
2366  }
2367  if(artificialDissipation){
2368  paramStatusMsg << "NavierStokesRHS: Artificial dissipation ON.";
2369  if(refRe == 0)
2370  paramStatusMsg << " Enabling DV exchange.";
2371  paramStatusMsg << std::endl;
2372  exchangeDV = true;
2373  }
2374  if(useAlternateRHS){
2375  exchangeFlux = true;
2376  paramStatusMsg << "NavierStokesRHS: Enabling flux exchange." << std::endl;
2377  }
2378  }
2379 
2380 #pragma omp barrier
2381  if(globalPtr){
2382 #pragma omp master
2383  {
2384  globalPtr->StdOut(paramStatusMsg.str());
2385  globalPtr->FunctionExit("SetParamState");
2386  }
2387  } else {
2388  std::cout << paramStatusMsg.str();
2389  }
2390 
2391  return(0);
2392  };
2393 
2394  // *NOT* thread-safe, call on master only
2395  int InitializeState(StateType &inState)
2396  {
2397 
2398  if(!gridInitialized)
2399  return(1);
2400 
2401  timeHandle = inState.GetDataIndex("simTime");
2402  if(timeHandle < 0)
2403  return(1);
2404  pcpp::field::dataset::DataBufferType &timeData(inState.Field(timeHandle));
2405  timePtr = timeData.Data<double>();
2406  if(!timePtr)
2407  return(1);
2408  time = *timePtr;
2409 
2410  cflHandle = inState.GetDataIndex("simCFL");
2411  if(cflHandle >= 0){
2412  pcpp::field::dataset::DataBufferType &cflData(inState.Field(cflHandle));
2413  cflPtr = cflData.Data<double>();
2414  }
2415 
2416  dtHandle = inState.GetDataIndex("simDT");
2417  if(dtHandle >= 0){
2418  pcpp::field::dataset::DataBufferType &dtData(inState.Field(dtHandle));
2419  dtPtr = dtData.Data<double>();
2420  }
2421 
2422  rhoHandle = inState.GetDataIndex("rho");
2423  if(rhoHandle < 0)
2424  return(1);
2425  pcpp::field::dataset::DataBufferType &rhoData(inState.Field(rhoHandle));
2426  rhoPtr = rhoData.Data<double>();
2427  if(!rhoPtr)
2428  return(1);
2429 
2430  rhoVHandle = inState.GetDataIndex("rhoV");
2431  if(rhoVHandle < 0)
2432  return(1);
2433  pcpp::field::dataset::DataBufferType &rhoVData(inState.Field(rhoVHandle));
2434  rhoVPtr = rhoVData.Data<double>();
2435  if(!rhoVPtr)
2436  return(1);
2437 
2438  rhoEHandle = inState.GetDataIndex("rhoE");
2439  if(rhoEHandle < 0)
2440  return(1);
2441  pcpp::field::dataset::DataBufferType &rhoEData(inState.Field(rhoEHandle));
2442  rhoEPtr = rhoEData.Data<double>();
2443  if(!rhoEPtr)
2444  return(1);
2445 
2446  pressureHandle = inState.GetDataIndex("pressure");
2447  if(pressureHandle >= 0){
2448  pcpp::field::dataset::DataBufferType &pressureData(inState.Field(pressureHandle));
2449  pressurePtr = pressureData.Data<double>();
2450  }
2451 
2452  temperatureHandle = inState.GetDataIndex("temperature");
2453  if(temperatureHandle >= 0){
2454  pcpp::field::dataset::DataBufferType &temperatureData(inState.Field(temperatureHandle));
2455  temperaturePtr = temperatureData.Data<double>();
2456  }
2457 
2458  // Optional data
2459  rhom1Handle = inState.GetDataIndex("rhom1");
2460  if(rhom1Handle >= 0){
2461  pcpp::field::dataset::DataBufferType &rhom1Data(inState.Field(rhom1Handle));
2462  rhom1Ptr = rhom1Data.Data<double>();
2463  }
2464 
2465  velocityHandle = inState.GetDataIndex("velocity");
2466  if(velocityHandle >= 0){
2467  pcpp::field::dataset::DataBufferType &velocityData(inState.Field(velocityHandle));
2468  velocityPtr = velocityData.Data<double>();
2469  }
2470 
2471  internalEnergyHandle = inState.GetDataIndex("internalEnergy");
2472  if(internalEnergyHandle >= 0){
2473  pcpp::field::dataset::DataBufferType &internalEnergyData(inState.Field(internalEnergyHandle));
2474  internalEnergyPtr = internalEnergyData.Data<double>();
2475  }
2476 
2477  scalarHandle = inState.GetDataIndex("scalarVars");
2478  if(scalarHandle >= 0){
2479  pcpp::field::dataset::DataBufferType &scalarData(inState.Field(scalarHandle));
2480  numScalar = inState.Meta()[scalarHandle].ncomp;
2481  scalarPtr = scalarData.Data<double>();
2482  if(!scalarPtr)
2483  return(1);
2484  } else {
2485  scalarPtr = NULL;
2486  numScalar = 0;
2487  }
2488 
2489 
2490  int stencilConnHandle = inState.GetDataIndex("sconn");
2491  if(stencilConnHandle >= 0){
2492  pcpp::field::dataset::DataBufferType &scData(inState.Field(stencilConnHandle));
2493  stencilConn = scData.Data<int>();
2494  if(!stencilConn)
2495  return(1);
2496  } else {
2497  stencilConn = NULL;
2498  }
2499 
2500  int iMaskHandle = inState.GetDataIndex("imask");
2501  if(iMaskHandle >= 0){
2502  pcpp::field::dataset::DataBufferType &imData(inState.Field(iMaskHandle));
2503  iMask = imData.Data<int>();
2504  if(!iMask)
2505  return(1);
2506  } else {
2507  iMask = NULL;
2508  }
2509 
2510  int dilatationHandle = inState.GetDataIndex("divV");
2511  if(dilatationHandle >= 0){
2512  pcpp::field::dataset::DataBufferType &dilData(inState.Field(dilatationHandle));
2513  velDiv = dilData.Data<double>();
2514  if(!velDiv)
2515  return(1);
2516  } else {
2517  velDiv = NULL;
2518  }
2519 
2520  // dynamic viscosity, mu
2521  muHandle = inState.GetDataIndex("mu");
2522  if(muHandle >= 0){
2523  pcpp::field::dataset::DataBufferType &muData(inState.Field(muHandle));
2524  muPtr = muData.Data<double>();
2525  } else {
2526  muPtr = NULL;
2527  }
2528 
2529  // bulk viscosity, lambda
2530  lambdaHandle = inState.GetDataIndex("lambda");
2531  if(lambdaHandle >= 0 ){
2532  pcpp::field::dataset::DataBufferType &lambdaData(inState.Field(lambdaHandle));
2533  lambdaPtr = lambdaData.Data<double>();
2534  } else {
2535  lambdaPtr = NULL;
2536  }
2537 
2538  // heat transfer coefficient, kappa
2539  kappaHandle = inState.GetDataIndex("kappa");
2540  if(kappaHandle >= 0){
2541  pcpp::field::dataset::DataBufferType &kappaData(inState.Field(kappaHandle));
2542  kappaPtr = kappaData.Data<double>();
2543  } else {
2544  kappaPtr = NULL;
2545  }
2546 
2547  // Number of equations we actually solve
2548  numEquations = numDim+2+numScalar;
2549  myStateBuffers.resize(numEquations+1,NULL); // extra one for scalar transport to keep API happy
2550 
2551  // myStateBuffers.resize(4,NULL);
2552  int iEquation = 0;
2553  myStateBuffers[iEquation++] = rhoPtr;
2554  for(int iDim = 0;iDim < numDim;iDim++)
2555  myStateBuffers[iEquation++] = &rhoVPtr[iDim*numPointsBuffer];
2556  myStateBuffers[iEquation++] = rhoEPtr;
2557  for(int iScalar = 0;iScalar < numScalar;iScalar++)
2558  myStateBuffers[iEquation++] = &scalarPtr[iScalar*numPointsBuffer];
2559  myRHSBuffers.resize(numEquations+1,NULL);
2560 
2561  if(dvBuffers.empty()){
2562 
2563  //
2564  numDV = numDim+4; // p T 1/rho V e
2565 
2566  // New shiny
2567  // dvBuffer = new double [numPointsBuffer * numDV];
2568  // fluxComponentPtr = new double [numFluxComponents * numPointsBuffer];
2569 
2570  // Old busted
2571  dvBuffers.resize(numDV); // = new double [6*numPoints];
2572 
2574  scaledPressure = new double [numPointsBuffer];
2575 
2576  if(pressurePtr){
2577  dvBuffers[0] = pressurePtr;
2578  } else {
2579  dvBuffers[0] = new double [numPointsBuffer];
2580  pressurePtr = dvBuffers[0];
2581  }
2582  if(temperaturePtr){
2583  dvBuffers[1] = temperaturePtr;
2584  } else {
2585  dvBuffers[1] = new double [numPointsBuffer];
2586  temperaturePtr = dvBuffers[1];
2587  }
2588  if(rhom1Ptr){
2589  dvBuffers[2] = rhom1Ptr;
2590  } else {
2591  dvBuffers[2] = new double [numPointsBuffer];
2592  rhom1Ptr = dvBuffers[2];
2593  }
2594  if(!velocityPtr)
2595  velocityPtr = new double [numDim*numPointsBuffer];
2596  for(int iDim = 0;iDim < numDim;iDim++)
2597  dvBuffers[iDim+3] = &velocityPtr[iDim*numPointsBuffer];
2598  if(internalEnergyPtr){
2599  dvBuffers[3+numDim] = internalEnergyPtr;
2600  } else {
2601  dvBuffers[3+numDim] = new double [numPointsBuffer];
2602  internalEnergyPtr = dvBuffers[3+numDim];
2603  }
2604  }
2605 
2606  if(tvBuffers.empty()){
2607 
2608  numTV = 3; // mu lambda kappa
2609  tvBuffers.resize(numTV);
2610  scaledTau = new double [numPointsBuffer];
2611 
2612  if(muPtr){
2613  tvBuffers[0] = muPtr;
2614  } else {
2615  tvBuffers[0] = new double [numPointsBuffer];
2616  muPtr = tvBuffers[0];
2617  }
2618  if(lambdaPtr){
2619  tvBuffers[1] = lambdaPtr;
2620  } else {
2621  tvBuffers[1] = new double [numPointsBuffer];
2622  lambdaPtr = tvBuffers[1];
2623  }
2624  if(kappaPtr){
2625  tvBuffers[2] = kappaPtr;
2626  } else {
2627  tvBuffers[2] = new double [numPointsBuffer];
2628  kappaPtr = tvBuffers[2];
2629  }
2630  }
2631 
2632  // initialize the RHS state object
2633  rhsState.AddField("vHat",'n',1,8,""); // only one component of vhat
2634  rhsState.AddField("viscidEnergy",'n',3,8,"");
2635  rhsState.AddField("inviscidFlux",'n',numEquations,8,"");
2636  rhsState.AddField("viscidFlux",'n',numEquations,8,"");
2637  rhsState.AddField("dFlux",'n',numEquations,8,"");
2638  if(refRe > 0 || artificialDissipation){
2639  rhsState.AddField("velGrad",'n',numDim*numDim,8,"");
2640  if(artificialDissipation){
2641  rhsState.AddField("velDiv",'n',1,8,"");
2642  rhsState.AddField("sigmaDiss",'n',1,8,"");
2643  rhsState.AddField("alphaDiss",'n',1,8,"");
2644  }
2645  }
2646  if(refRe > 0){
2647  rhsState.AddField("tau",'n',(numDim*(numDim+1))/2,8,"");
2648  rhsState.AddField("temperatureGrad",'n',numDim,8,"");
2649  rhsState.AddField("heatFlux",'n',numDim,8,"");
2650  }
2651  rhsState.Create(numPointsBuffer,0);
2652  velHat = rhsState.template GetFieldBuffer<double>("vHat");
2653  viscidEnergy = rhsState.template GetFieldBuffer<double>("viscidEnergy");
2654  if(artificialDissipation){
2655  if(velDiv){
2656  rhsState.SetFieldBuffer("velDiv",velDiv);
2657  } else {
2658  velDiv = rhsState.template GetFieldBuffer<double>("velDiv");
2659  }
2660  sigmaDissipation = rhsState.template GetFieldBuffer<double>("sigmaDiss");
2661  alphaDissipation = rhsState.template GetFieldBuffer<double>("alphaDiss");
2662  } else {
2663  sigmaDissipation = NULL;
2664  alphaDissipation = NULL;
2665  }
2666 
2667  velocityHandle = inState.GetDataIndex("velocity");
2668  if(velocityHandle >= 0){
2669  pcpp::field::dataset::DataBufferType &velocityData(inState.Field(velocityHandle));
2670  velocityPtr = velocityData.Data<double>();
2671  }
2672 
2673  // inviscid flux buffers
2674  if(inviscidFluxBuffers.empty()){
2675  inviscidFluxBuffers.resize(numEquations);
2676  }
2677  int inviscidFluxHandle = rhsState.GetDataIndex("inviscidFlux");
2678  inviscidFluxBuffer = rhsState.GetRealFieldBuffer(inviscidFluxHandle);
2679  for(int i=0;i<numEquations;i++){
2680  inviscidFluxBuffers[i] = &inviscidFluxBuffer[i*numPointsBuffer];
2681  }
2682 
2683  // viscid flux buffers
2684  if(viscidFluxBuffers.empty()){
2685  viscidFluxBuffers.resize(numEquations);
2686  }
2687  int viscidFluxHandle = rhsState.GetDataIndex("viscidFlux");
2688  viscidFluxBuffer = rhsState.GetRealFieldBuffer(viscidFluxHandle);
2689  for(int i=0;i<numEquations;i++){
2690  viscidFluxBuffers[i] = &viscidFluxBuffer[i*numPointsBuffer];
2691  }
2692 
2693  // flux deriviatives
2694  if(dFluxBuffers.empty()){
2695  dFluxBuffers.resize(numEquations);
2696  }
2697  int dFluxHandle = rhsState.GetDataIndex("dFlux");
2698  dFluxBuffer = rhsState.GetRealFieldBuffer(dFluxHandle);
2699  for(int i=0;i<numEquations;i++){
2700  dFluxBuffers[i] = &dFluxBuffer[i*numPointsBuffer];
2701  }
2702 
2703  if(refRe > 0){
2704  if(velGradBuffers.empty()){
2705  velGradBuffers.resize(numDim);
2706  }
2707  int velGradHandle = rhsState.GetDataIndex("velGrad");
2708  velGradBufferPtr = rhsState.GetRealFieldBuffer(velGradHandle);
2709  for(int i=0;i<numDim;i++){
2710  velGradBuffers[i] = &velGradBufferPtr[i*numDim*numPointsBuffer];
2711  }
2712 
2713  if(tauBuffers.empty()){
2714  tauBuffers.resize(numDim*numDim);
2715  }
2716  int tauHandle = rhsState.GetDataIndex("tau");
2717  tauBufferPtr = rhsState.GetRealFieldBuffer(tauHandle);
2718 
2719  // tau is a symmetric 2-tensor, only store the upper half
2720  // and set the pointers of the lower half to the memory
2721  // for the upper half
2722  size_t bufferIndex = 0;
2723  for(int i=0;i<numDim;i++){
2724  for(int j=0;j<numDim;j++){
2725  int index = i*numDim + j;
2726  if (i <= j){
2727  tauBuffers[index] = &tauBufferPtr[bufferIndex];
2728  bufferIndex += numPointsBuffer;
2729  } else {
2730  tauBuffers[index] = tauBuffers[j*numDim+i];
2731  }
2732  }
2733  }
2734 
2735  // thermal conduction storage
2736  if(temperatureGradBuffers.empty()){
2737  temperatureGradBuffers.resize(numDim);
2738  }
2739  int temperatureGradHandle = rhsState.GetDataIndex("temperatureGrad");
2740  temperatureGradBufferPtr = rhsState.GetRealFieldBuffer(temperatureGradHandle);
2741  for(int i=0;i<numDim;i++){
2742  temperatureGradBuffers[i] = &temperatureGradBufferPtr[i*numPointsBuffer];
2743  }
2744 
2745  if(heatFluxBuffers.empty()){
2746  heatFluxBuffers.resize(numDim);
2747  }
2748  int heatFluxHandle = rhsState.GetDataIndex("heatFlux");
2749  heatFluxBufferPtr = rhsState.GetRealFieldBuffer(heatFluxHandle);
2750 
2751  for(int i=0;i<numDim;i++){
2752  heatFluxBuffers[i] = &heatFluxBufferPtr[i*numPointsBuffer];
2753  }
2754 
2755  }
2756 
2757  //
2758  // register needed buffers with the eos object
2759  //
2760  eosPtr->SetupPressureBuffer(pressurePtr);
2761  eosPtr->SetupTemperatureBuffer(temperaturePtr);
2762  eosPtr->SetupSpecificVolumeBuffer(rhom1Ptr);
2763  eosPtr->SetupInternalEnergyBuffer(internalEnergyPtr);
2764 
2765  dScalarHandle = scalarHandle;
2766  stateInitialized = true;
2767 
2768  SetTargetState(inState);
2769 
2770  return(0);
2771  };
2772 
2773  // *NOT* thread-safe, call on master only
2774  int SetTargetState(StateType &inState)
2775  {
2776  if(!stateInitialized)
2777  return(1);
2778 
2779  targetState.Copy(inState);
2780 
2781  targetStateBuffers.resize(0);
2782  int numTargetVars = numEquations+1;
2783  targetStateBuffers.resize(numTargetVars,NULL);
2784 
2785  rhoTargetPtr = targetState.GetRealFieldBuffer(rhoHandle);
2786  rhoVTargetPtr = targetState.GetRealFieldBuffer(rhoVHandle);
2787  rhoETargetPtr = targetState.GetRealFieldBuffer(rhoEHandle);
2788 
2789  int iEquation = 0;
2790  targetStateBuffers[iEquation++] = rhoTargetPtr;
2791  for(int iDim = 0;iDim < numDim;iDim++)
2792  targetStateBuffers[iEquation++] = &rhoVTargetPtr[iDim*numPointsBuffer];
2793  targetStateBuffers[iEquation++] = rhoETargetPtr;
2794 
2795  if(numScalar > 0){
2796  scalarTargetPtr = targetState.GetRealFieldBuffer(scalarHandle);
2797  for(int iScalar = 0;iScalar < numScalar;iScalar++)
2798  targetStateBuffers[iEquation++] = &scalarTargetPtr[iScalar*numPointsBuffer];
2799  }
2800 
2801 
2802  return(0);
2803 
2804  };
2805 
2806 
2807  int SetState(StateType &inState)
2808  {
2809  if(!stateInitialized)
2810  return(1);
2811 
2812  rhoPtr = inState.GetRealFieldBuffer(rhoHandle);
2813  rhoVPtr = inState.GetRealFieldBuffer(rhoVHandle);
2814  rhoEPtr = inState.GetRealFieldBuffer(rhoEHandle);
2815 
2816  int iEquation = 0;
2817  myStateBuffers[iEquation++] = rhoPtr;
2818  for(int iDim = 0;iDim < numDim;iDim++)
2819  myStateBuffers[iEquation++] = &rhoVPtr[iDim*numPointsBuffer];
2820  myStateBuffers[iEquation++] = rhoEPtr;
2821 
2822  if(numScalar > 0){
2823  scalarPtr = inState.GetRealFieldBuffer(scalarHandle);
2824  for(int iScalar = 0;iScalar < numScalar;iScalar++)
2825  myStateBuffers[iEquation++] = &scalarPtr[iScalar*numPointsBuffer];
2826  }
2827 
2828 
2829  timePtr = inState.GetRealFieldBuffer(timeHandle);
2830  time = *timePtr;
2831 
2832  if(dtHandle >= 0){
2833  dtPtr = inState.GetRealFieldBuffer(dtHandle);
2834  }
2835  if(cflHandle >= 0){
2836  cflPtr = inState.GetRealFieldBuffer(cflHandle);
2837  }
2838 
2839  return(0);
2840 
2841  };
2842 
2843  int SetRHSState(StateType &rhsState)
2844  {
2845 
2846  if(dRhoHandle < 0){
2847  dRhoHandle = rhsState.GetDataIndex("rho");
2848  dRhoVHandle = rhsState.GetDataIndex("rhoV");
2849  dRhoEHandle = rhsState.GetDataIndex("rhoE");
2850  if(numScalar > 0)
2851  dScalarHandle = rhsState.GetDataIndex("scalarVars");
2852  }
2853 
2854  rhoRHSPtr = rhsState.GetRealFieldBuffer(dRhoHandle);
2855  rhoVRHSPtr = rhsState.GetRealFieldBuffer(dRhoVHandle);
2856  rhoERHSPtr = rhsState.GetRealFieldBuffer(dRhoEHandle);
2857 
2858  if(numScalar > 0)
2859  scalarRHSPtr = rhsState.GetRealFieldBuffer(dScalarHandle);
2860  else
2861  scalarRHSPtr = NULL;
2862 
2863  int iEquation = 0;
2864  myRHSBuffers[iEquation++] = rhoRHSPtr;
2865  for(int iDim = 0;iDim < numDim;iDim++)
2866  myRHSBuffers[iEquation++] = &rhoVRHSPtr[numPointsBuffer*iDim];
2867  myRHSBuffers[iEquation++] = rhoERHSPtr;
2868  for(int iScalar = 0;iScalar < numScalar;iScalar++)
2869  myRHSBuffers[iEquation++] = &scalarRHSPtr[numPointsBuffer*iScalar];
2870 
2871  return(0);
2872 
2873  };
2874 
2875  // ============= Stencil & Operator utils ========
2876 
2877  // This routine *needs* to be thread-aware - but calling sequence needs to be
2878  // worked out. Currently is called before threads are initialized.
2881  {
2882 
2883  if(!operatorInitialized)
2884  return(1);
2885 
2886  int myThreadId = 0;
2887  int numThreads = 1;
2888 
2889  // #ifdef USE_OMP
2890  // myThreadId = omp_get_thread_num();
2891  // numThreads = omp_get_num_threads();
2892  // masterThread = (myThreadId == 0);
2893  // #endif
2894 
2895  // if(masterThread)
2896 
2897 #pragma omp master
2898  {
2899  if(!stencilConn){
2900  stencilConn = new int [numDim*numPointsBuffer];
2901  }
2902  if(artificialDissipation){
2903  artDissConn = new int [numDim*numPointsBuffer];
2904  artDissConnSplit = new int [numDim*numPointsBuffer];
2905  }
2906  if(!iMask){
2907  iMask = new int [numPointsBuffer];
2908  }
2909  }
2910 
2911  //#pragma omp barrier
2912 
2913  // Create stencil connectivity
2914  // if(numThreads == 1){
2915  // Only deals with actual domain boundaries - no holes
2917  boundaryDepth,&periodicDirs[0],
2918  stencilConn,true);
2919 
2920  if(artificialDissipation){
2922  artDissOperator.boundaryDepth,&periodicDirs[0],
2923  artDissConn,true);
2924 
2926  artDissOperatorSplit.boundaryDepth,&periodicDirs[0],
2927  artDissConnSplit,true);
2928  }
2929  // } else {
2930  // plascom2::operators::sbp::CreateStencilConnectivity(numDim,&bufferSizes[0],
2931  // &fortranPartitionBufferIntervals[myThreadId][0],
2932  // boundaryDepth,&periodicDirs[0],
2933  // stencilConn,true);
2934  // }
2935 
2936 
2937  // plascom2::operators::sbp::CreateStencilConnectivity(numDim,&gridSize[0],opInterval,boundaryDepth,stencilConn,true);
2938 
2939  // bug! unlike stencilConn, these need to be thread-specific
2940  // @todo Thread-specific dual stencil connectivities in RHS
2941  // Invert the connectivity
2942  // plascom2::operators::InvertStencilConnectivity(numDim,opInterval,stencilConn,dualStencilConn,true);
2943 
2944 
2945  // dualStencilConn = new size_t [numDim*numPointsBuffer]; // [DIM1[stencil1Points,stencil2Points....stencil(numStencils)Points]...]
2946  // numPointsStencil = new size_t [numDim*numStencils]; // [DIM1[numPoints1,numPoints2....numPoints(numStencils)]...]
2947 
2948  // plascom2::operators::sbp::InvertStencilConnectivity(numDim,&bufferSizes[0],&opInterval[0],numStencils,
2949  // stencilConn,dualStencilConn,numPointsStencil,true);
2950  return(0);
2951 
2952  };
2953 
2954 
2955  int SetupOperators(OperatorType &inOperator)
2956  {
2957 
2958  int myThreadId = 0;
2959  int numThreads = 1;
2960 
2961  // #ifdef USE_OMP
2962  // myThreadId = omp_get_thread_num();
2963  // masterThread = (myThreadId == 0);
2964  // #endif
2965 
2966 #pragma omp master
2967  {
2968  // ==== Main differential operators ===
2969  if(!operatorInitialized){
2970  myOperator.Copy(inOperator);
2971 
2972  numStencils = myOperator.numStencils;
2973  stencilStarts = myOperator.stencilStarts;
2974  stencilOffsets = myOperator.stencilOffsets;
2975  stencilSizes = myOperator.stencilSizes;
2976  stencilWeights = myOperator.stencilWeights;
2977  boundaryDepth = myOperator.boundaryDepth;
2978  boundaryWeight = myOperator.boundaryWeight;
2979  numStencilValues = myOperator.numValues;
2980 
2981  // Forticate the stencil starts
2982  for(int iStencil = 0;iStencil < numStencils;iStencil++)
2983  stencilStarts[iStencil]++;
2984 
2985  // === artificial dissipation operators ===
2986  if(artificialDissipation){
2987  int interiorOrderAD = (myOperator.overallOrder-1)*2;
2988  plascom2::operators::dissipation::Initialize(artDissOperator,artDissOperatorSplit,interiorOrderAD);
2989  int *startPtr = artDissOperator.stencilStarts;
2990  int numStencilsAD = artDissOperator.numStencils;
2991  // Forticate the stencil starts
2992  for(int iStencil = 0;iStencil < numStencilsAD;iStencil++)
2993  startPtr[iStencil]++;
2994  startPtr = artDissOperatorSplit.stencilStarts;
2995  numStencilsAD = artDissOperator.numStencils;
2996  for(int iStencil = 0;iStencil < numStencilsAD;iStencil++)
2997  startPtr[iStencil]++;
2998  }
2999 
3000  operatorInitialized = true;
3001  }
3002  }
3003  //#pragma omp barrier
3004  if(SetupStencilConnectivities())
3005  return(1);
3006 
3007  gridPtr->SetupDifferentialOperator(&myOperator,&stencilConn[0]);
3008  std::ostringstream metricMessages;
3009  if(gridPtr->ComputeMetrics(metricMessages)){
3010  std::ostringstream failMessage;
3011  failMessage << "navierstokes::rhs::SetupOperators: Error: grid::ComputeMetrics failed "
3012  << "with messages:" << std::endl << metricMessages.str() << std::endl;
3013  if(globalPtr){
3014  globalPtr->ErrOut(failMessage.str());
3015  } else {
3016  std::cout << failMessage.str();
3017  }
3018  return(1);
3019  }
3020  // if(gridType == simulation::grid::RECTILINEAR){
3021 // std::vector<double> &gridMetric(gridPtr->Metric());
3022 // std::vector<double> &gridJacobian(gridPtr->Jacobian());
3023 // std::vector<double> metricDeriv(2*numPointsBuffer,0);
3024 // std::ofstream metricOut;
3025 // metricOut.open("metric.dat");
3026 // pcpp::io::DumpContents(metricOut,gridMetric," ");
3027 // metricOut << std::endl;
3028 // pcpp::io::DumpContents(metricOut,gridJacobian," ");
3029 // metricOut << std::endl;
3030 // metricOut.close();
3031  // ApplyOperator(0,1,&gridMetric[0],&metricDeriv[0],0);
3032  // ApplyOperator(1,1,&gridMetric[numPointsBuffer],&metricDeriv[numPointsBuffer],0);
3033  // std::ofstream Ouf;
3034  // Ouf.open("metricDeriv");
3035  // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
3036  // Ouf << metricDeriv[iPoint] << " ";
3037  // for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
3038  // Ouf << metricDeriv[numPointsBuffer+iPoint] << " ";
3039  // Ouf.close();
3040  // }
3041 
3042 
3043  return(0);
3044 
3045  };
3046 
3047  // ====== Thread set up/handling =====
3048 
3050  {
3051  int myThreadId = 0;
3052  int numThreads = 1;
3053  bool inParallel = false;
3054 
3055 #ifdef USE_OMP
3056  inParallel = omp_in_parallel();
3057  if(inParallel){
3058  myThreadId = omp_get_thread_num();
3059  numThreads = omp_get_num_threads();
3060  }
3061 #endif
3062 
3063 #pragma omp master
3064  {
3065  fortranBufferIntervals.resize(numThreads);
3066  fortranPartitionBufferIntervals.resize(numThreads);
3067  }
3068 
3069  if(inParallel){
3070 #pragma omp barrier
3071  }
3072 
3073  std::vector<pcpp::IndexIntervalType> &threadPartitionBufferIntervals(gridPtr->ThreadPartitionBufferIntervals());
3074  if(threadPartitionBufferIntervals.size() != numThreads)
3075  return(1);
3076 
3077  threadPartitionBufferIntervals[myThreadId].Flatten(fortranPartitionBufferIntervals[myThreadId]);
3078 
3079  std::vector<pcpp::IndexIntervalType> &threadBufferIntervals(gridPtr->ThreadBufferIntervals());
3080  if(threadBufferIntervals.size() != numThreads)
3081  return(1);
3082  threadBufferIntervals[myThreadId].Flatten(fortranBufferIntervals[myThreadId]);
3083 
3084  // Forticate the thread intervals
3085  std::vector<size_t>::iterator opIt = fortranPartitionBufferIntervals[myThreadId].begin();
3086  while(opIt != fortranPartitionBufferIntervals[myThreadId].end()){
3087  *opIt += 1;
3088  opIt++;
3089  }
3090 
3091  std::vector<size_t>::iterator bufIt = fortranBufferIntervals[myThreadId].begin();
3092  while(bufIt != fortranBufferIntervals[myThreadId].end()){
3093  *bufIt += 1;
3094  bufIt++;
3095  }
3096 
3097  return(0);
3098 
3099  };
3100 
3101 
3102  // ====== Grid (and halo) handling =====
3103  int SetGrid(GridType &inGrid)
3104  {
3105 
3106  int numThreads = 1;
3107 
3108 #ifdef USE_OMP
3109  numThreads = omp_get_num_threads();
3110 #endif
3111 
3112  if(gridPtr != &inGrid){
3113 
3114  int myThreadId = 0;
3115 
3116 #ifdef USE_OMP
3117  myThreadId = omp_get_thread_num();
3118 #endif
3119 
3120 
3121 #pragma omp master
3122  {
3123 
3124  gridPtr = &inGrid;
3125  gridSizes = gridPtr->GridSizes();
3126  bufferSizes = gridPtr->BufferSizes();
3127  partitionInterval = gridPtr->PartitionInterval();
3128  partitionBufferInterval = gridPtr->PartitionBufferInterval();
3129  numPointsPartition = partitionInterval.NNodes();
3130  numDim = gridSizes.size();
3131  numPointsBuffer = gridPtr->BufferSize();
3132  gridCommPtr = &gridPtr->Communicator();
3133  subRegionsPtr = &gridPtr->SubRegions();
3134  gridCommPtr->CartNeighbors(gridNeighbors);
3135  gridType = gridPtr->Type();
3136 
3137  // std::vector<double> &gridJacobianVector(gridPtr->Jacobian());
3138  // std::vector<double> &gridMetricVector(gridPtr->Metric());
3139 
3140  // gridJacobian = &gridJacobianVector[0];
3141  // gridMetric = &gridMetricVector[0];
3142  // if(velHat)
3143  // delete [] velHat;
3144  // velHat = new double [numDim*numPointsBuffer];
3145  gridInitialized = true;
3146  gridDX.resize(numDim,1.0);
3147  gridDXM1.resize(numDim,1.0);
3148  gridMetric.resize(numDim,1.0);
3149  gridJacobian = 1.0;
3150  std::vector<double> &dX(inGrid.DX());
3151  std::vector<double>::iterator dXIt = dX.begin();
3152  std::vector<double>::iterator gridDXIt = gridDX.begin();
3153  std::vector<double>::iterator gridDXM1It = gridDXM1.begin();
3154  while(dXIt != dX.end()){
3155  double dx = *dXIt++;
3156  *gridDXIt++ = dx;
3157  *gridDXM1It++ = 1/dx;
3158  gridJacobian *= 1/dx;
3159  }
3160 
3161  dXIt = dX.begin();
3162  gridDXM1It = gridMetric.begin();
3163  while(dXIt != dX.end()){
3164  double dx = *dXIt++;
3165  *gridDXM1It++ = 1.0/(dx*gridJacobian);
3166  }
3167 
3168  partitionBufferInterval.Flatten(opInterval);
3169 
3170  // Forticate the opInterval
3171  std::vector<size_t>::iterator opIt = opInterval.begin();
3172  while(opIt != opInterval.end()){
3173  *opIt += 1;
3174  opIt++;
3175  }
3176  }
3177  // Reset the thread intervals for this grid
3178  InitThreadIntervals();
3179  }
3180 
3181  // else if ((numThreads > 1) && !threadsInitialized) { // JIC not already done
3182  // InitThreadIntervals();
3183  // }
3184  return(0);
3185  };
3186 
3187 
3188  int HaloSetup(HaloType &inHalo)
3189  {
3190 
3191 #pragma omp master
3192  {
3193  SetHalo(inHalo);
3194  periodicDirs = inHalo.PeriodicDirs();
3195  if(periodicDirs.empty()){
3196  periodicDirs.resize(numDim,1);
3197  }
3198  maskMessageId = inHalo.CreateMessageBuffers(1,4);
3199  if(exchangeDV)
3200  dvMessageId = inHalo.CreateMessageBuffers(numDV);
3201  if(exchangeFlux)
3202  fluxMessageId = inHalo.CreateMessageBuffers(numEquations);
3203  }
3204  return(0);
3205  };
3206 
3207  int SetHalo(HaloType &inHalo)
3208  {
3209  haloPtr = &inHalo;
3210  return(0);
3211  };
3212 
3213 
3214  // ===== Boundary related stuff ====
3215 
3216  // This function sets the domainBoundaries data structure *and* goes
3217  // through the stencilConnectivity to assign boundary stencils. It is
3218  // redundant for the actual grid/domain boundaries, but boundary stencils
3219  // for internal boundaries are set here.
3220  // Threaded fail
3222  void SetDomainBCs(std::vector<BoundaryType> &domainBoundaries,std::vector<BCType> &domainBCs)
3223  {
3224 
3225  domainBCsPtr = &domainBCs;
3226  domainBoundariesPtr = &domainBoundaries;
3227  assert(subRegionsPtr != NULL);
3228 
3229  std::vector<GridRegionType> &gridRegions(*subRegionsPtr);
3230 
3231  int numBCs = domainBCs.size();
3232  int numGridRegions = gridRegions.size();
3233  int numDomainBoundaries = domainBoundaries.size();
3234  int holeStencil = myOperator.numStencils;
3235  int adHole = artDissOperator.numStencils;
3236  int adSplitHole = artDissOperatorSplit.numStencils;
3237  int adBoundaryDepth = artDissOperator.boundaryDepth;
3238  int adSplitBoundaryDepth = artDissOperatorSplit.boundaryDepth;
3239 
3240  // std::cout << "Boundary stencil processing report: " << std::endl
3241  // << "HoleStencil: " << holeStencil << std::endl
3242  // << "ADHoleStencil: " << adHole << std::endl
3243  // << "ADSplitHole: " << adSplitHole << std::endl
3244  // << "ADBoundaryDepth: " << adBoundaryDepth << std::endl
3245  // << "ADBoundaryDepthS: " << adSplitBoundaryDepth << std::endl;
3246 
3247  if(numBCs == 0)
3248  return;
3249  if(numDomainBoundaries == 0)
3250  return;
3251 
3252 
3254  std::vector<size_t> partitionBufferExtent;
3255  partitionBufferInterval.Flatten(partitionBufferExtent);
3256 
3257  typename std::vector<BoundaryType>::iterator dbIt = domainBoundaries.begin();
3258 
3259  // Initialize the mask to zero
3260  for(size_t iPoint = 0;iPoint < numPointsBuffer;iPoint++)
3261  iMask[iPoint] = 0;
3262 
3263  // First, loop through all holes and create the iMask
3264  dbIt = domainBoundaries.begin();
3265  while(dbIt != domainBoundaries.end()){
3266  BoundaryType &domainBoundary(*dbIt++);
3267  GridRegionType &gridRegion(gridRegions[domainBoundary.GridRegionID()]);
3268 
3269  int boundaryNormalDirection = gridRegion.normalDirection;
3270  pcpp::IndexIntervalType &boundaryPartitionInterval(gridRegion.regionPartitionInterval);
3271 
3272  int returnCode = 0;
3273  BCType &boundaryCondition(domainBoundary.BC());
3274  // unique int id for bc
3275  int bcType = boundaryCondition.BCType();
3276 
3277  if(bcType == navierstokes::HOLE) {
3278  // set hole bits in mask
3280  boundaryPartitionInterval.RelativeTranslation(partitionInterval,
3281  partitionBufferInterval,
3282  opInterval);
3283  if(opInterval.NNodes() > 0) {
3284  std::vector<size_t> opExtents;
3285  opInterval.Flatten(opExtents);
3286  mask::SetMask(bufferSizes,opInterval,holeMask,iMask);
3287  }
3288  }
3289  }
3290 
3291  // MJA
3292  // Bug here? Only needed for artificial dissipation
3293  //
3294  // std::cout << "passed this part" << std::endl;
3295  haloPtr->PackMessageBuffers(maskMessageId,0,1,iMask);
3296  haloPtr->SendMessage(maskMessageId,gridNeighbors,*gridCommPtr);
3297  haloPtr->ReceiveMessage(maskMessageId,gridNeighbors,*gridCommPtr);
3298  haloPtr->UnPackMessageBuffers(maskMessageId,0,1,iMask);
3299 
3300 
3301  // First, loop through all holes and create the iMask
3302  dbIt = domainBoundaries.begin();
3303  while(dbIt != domainBoundaries.end()){
3304  BoundaryType &domainBoundary(*dbIt++);
3305  GridRegionType &gridRegion(gridRegions[domainBoundary.GridRegionID()]);
3306 
3307  int boundaryNormalDirection = gridRegion.normalDirection;
3308  pcpp::IndexIntervalType &boundaryPartitionInterval(gridRegion.regionPartitionInterval);
3309 
3310  int returnCode = 0;
3311  BCType &boundaryCondition(domainBoundary.BC());
3312  int bcType = boundaryCondition.BCType();
3313 
3314  if(bcType == navierstokes::HOLE) {
3315  returnCode = plascom2::operators::sbp::DetectHoles(numDim,&bufferSizes[0],&partitionBufferExtent[0],
3316  boundaryDepth,holeMask,iMask,stencilConn);
3317 
3318 
3319  if(artificialDissipation){
3320  returnCode = plascom2::operators::sbp::DetectHoles(numDim,&bufferSizes[0],&partitionBufferExtent[0],
3321  adBoundaryDepth,holeMask,iMask,artDissConn);
3322  returnCode = plascom2::operators::sbp::DetectHoles(numDim,&bufferSizes[0],&partitionBufferExtent[0],
3323  adSplitBoundaryDepth,holeMask,iMask,artDissConnSplit);
3324  }
3325  }
3326  }
3327  };
3328 
3329  // == auxiliary methods ==
3330  int InitStep(GridType &inGrid,StateType &inState)
3331  {
3332  return(0);
3333  };
3334  virtual int ProcessInterior2(GridType &inGrid,StateType &inState,StateType &rhsState)
3335  { return(0); };
3336  virtual int ProcessHalo(HaloType &inHalo,GridType &inGrid,StateType &inState,StateType &rhsState)
3337  { return(0); };
3338  virtual int ProcessInterior(GridType &inGrid,StateType &inState,StateType &rhsState)
3339  { return(0); };
3340  virtual int ProcessBoundary(GridType &inGrid,StateType &inState,StateType &rhsState)
3341  { return(0); };
3342 
3344  {
3345  globalPtr = &inGlobal;
3346  };
3347 
3348  std::vector<double *> &DVBuffers()
3349  { return(dvBuffers); };
3350  std::vector<double *> &TVBuffers()
3351  { return(tvBuffers); };
3352  std::vector<double *> &StateBuffers()
3353  { return(myStateBuffers); };
3354  std::vector<double *> &RHSBuffers()
3355  { return(myRHSBuffers); };
3357  { return(inviscidFluxBuffer); };
3358  double *DFluxBuffer()
3359  { return(dFluxBuffer); };
3361  { return(numEquations); };
3362 
3363 
3365  { return(coeffsWENO); };
3366  std::vector<double *> &VelGradBuffers()
3367  { return(velGradBuffers); };
3368  std::vector<double *> &TemperatureGradBuffers()
3369  { return(temperatureGradBuffers); };
3370 
3372  {
3373 #pragma omp master
3374  {
3375  minSpacing.resize(numDim,std::numeric_limits<double>::max());
3376  // if(gridType < simulation::grid::RECTILINEAR){
3377  if(true){
3378  for(int iDim = 0;iDim < numDim;iDim++){
3379  minSpacing[iDim] = gridDX[iDim];
3380  }
3381 
3382  // } else {
3383  // std::vector<double> &coordinateData(gridPtr->CoordinateData());
3384  // std::vector<size_t> dimOffset(numDim,0);
3385  // for(int iDim = 0;iDim < numDim;iDim++)
3386  // dimOffset[iDim] = iDim * numPointsBuffer;
3387 
3388  // pcpp::IndexIntervalType globalInterval;
3389  // globalInterval.InitSimple(gridSizes);
3390  // std::vector<size_t> partitionPointIndices;
3391  // globalInterval.GetFlatIndices(partitionBufferInterval,partitionPointIndices);
3392  // std::vector<size_t>::iterator pointIt = partitionPointIndices.begin();
3393  // while(pointIt != partitionPointIndices.end()){
3394  // size_t bufferIndex = *pointIt++;
3395  // for(int iDim = 0;iDim < numDim;iDim++){
3396  // if(coordinateData[bufferIndex+dimOffset[iDim]] < minSpacing[iDim])
3397  // minSpacing[iDim] = coordinateData[bufferIndex+dimOffset[iDim]];
3398  // }
3399  // }
3400  }
3401  }
3402  };
3403 
3404  // Must be called on all threads
3405  double TimeStep(double *outCFL = NULL)
3406  {
3407  if(minSpacing.empty())
3408  InitMinSpacing();
3409  double spaceFactor = 0.0;
3410  for(int iDim = 0;iDim < numDim;iDim++)
3411  spaceFactor += 1.0/(minSpacing[iDim]);
3412 
3413  *dtPtr = 0;
3414  if(timeSteppingMode == CONSTANT_DT){
3415 #pragma omp master
3416  {
3417  if(*dtPtr != inputDT){
3418  *dtPtr = inputDT;
3419  *cflPtr = refSndSpd*(*dtPtr)*spaceFactor;
3420  }
3421  }
3422 #pragma omp barrier
3423  if(outCFL != NULL){
3424  *outCFL = *cflPtr;
3425  }
3426  } else if (timeSteppingMode == CONSTANT_CFL){
3427 
3428 #pragma omp master
3429  {
3430  if(*cflPtr != inputCFL){
3431  *cflPtr = inputCFL;
3432  *dtPtr = *cflPtr/(refSndSpd*spaceFactor);
3433  }
3434  }
3435 #pragma omp barrier
3436 
3437  }
3438  return(*dtPtr);
3439  };
3440 
3441  private:
3442  int Init()
3443  {
3444 
3445  int myThreadId = 0;
3446  int numThreads = 1;
3447 
3448 #ifdef USE_OMP
3449  myThreadId = omp_get_thread_num();
3450  numThreads = omp_get_num_threads();
3451 #endif
3452 
3453 #pragma omp master
3454  {
3455  myCFL = 1.0;
3456  myDT = 0.0;
3457 
3458  // if(masterThread){
3459  // Conserved state
3460  rhoHandle = -1;
3461  rhoVHandle = -1;
3462  rhoEHandle = -1;
3463  scalarHandle = -1;
3464  dRhoHandle = -1;
3465  dRhoVHandle = -1;
3466  dRhoEHandle = -1;
3467  dScalarHandle = -1;
3468 
3469  // Dependent state
3470  temperatureHandle = -1;
3471  pressureHandle = -1;
3472  gradVHandle = -1;
3473  gradTHandle = -1;
3474 
3475  // Parameters
3476  gammaHandle = -1;
3477  cflHandle = -1;
3478  dtHandle = -1;
3479  numScalar = 0;
3480  sigmaHandle = -1;
3481  cHandle = -1;
3482  reHandle = -1;
3483  cpHandle = -1;
3484  gasModel = 0;
3485  transportModel = 0;
3486  gridType = 0;
3487 
3488  // Optional data handles
3489  velocityHandle = -1;
3490  rhom1Handle = -1;
3491  internalEnergyHandle = -1;
3492 
3493  // Misc
3494  numPointsPartition = 0;
3495  numPointsBuffer = 0;
3496  numDim = 0;
3497  numStateFields = 3;
3498  numDV = 6;
3499  numStateVar = 5;
3500  gridPtr = NULL;
3501  cPtr = NULL;
3502  rePtr = NULL;
3503  cpPtr = NULL;
3504  timePtr = NULL;
3505  rhoPtr = NULL;
3506  rhoVPtr = NULL;
3507  rhoEPtr = NULL;
3508  scalarPtr = NULL;
3509  rhoTargetPtr = NULL;
3510  rhoVTargetPtr = NULL;
3511  rhoETargetPtr = NULL;
3512  scalarTargetPtr = NULL;
3513  pressurePtr = NULL;
3514  temperaturePtr = NULL;
3515  rhoRHSPtr = NULL;
3516  rhoVRHSPtr = NULL;
3517  rhoERHSPtr = NULL;
3518  scalarRHSPtr = NULL;
3519  massFluxPtr = NULL;
3520  momentumFluxPtr = NULL;
3521  energyFluxPtr = NULL;
3522  scalarFluxPtr = NULL;
3523  fluxComponentPtr = NULL;
3524  velocityPtr = NULL;
3525  rhom1Ptr = NULL;
3526  internalEnergyPtr = NULL;
3527  massSourcePtr = NULL;
3528  momentumSourcePtr = NULL;
3529  energySourcePtr = NULL;
3530  scalarSourcePtr = NULL;
3531  dvBuffer = NULL;
3532  viscidFluxBuffer = NULL;
3533  inviscidFluxBuffer = NULL;
3534  dFluxBuffer = NULL;
3535  scaledPressure = NULL;
3536  scaledTau = NULL;
3537  velHat = NULL;
3538  viscidEnergy = NULL;
3539  velGradBufferPtr = NULL;
3540  velDiv = NULL;
3541  sigmaDissipation = NULL;
3542  alphaDissipation = NULL;
3543 
3544  // EOS
3545  eosPtr = NULL;
3546 
3547  temperatureGradBufferPtr = NULL;
3548  cflPtr = &myCFL;
3549  dtPtr = &myDT;
3550  timeSteppingMode = CONSTANT_DT;
3551 
3552  // BCs-related
3553  subRegionsPtr = NULL;
3554  domainBoundariesPtr = NULL;
3555  domainBCsPtr = NULL;
3556 
3557  // Operator-related
3558  stencilConn = NULL;
3559  artDissConn = NULL;
3560  artDissConnSplit = NULL;
3561  iMask = NULL;
3562 
3563  // dvBuffers = NULL;
3564  // fluxBuffers = NULL;
3565  stateInitialized = false;
3566  gridInitialized = false;
3567  threadsInitialized = false;
3568  operatorInitialized = false;
3569  errorCode = 0;
3570 
3571  dvMessageId = -1;
3572  fluxMessageId = -1;
3573  maskMessageId = -1;
3574 
3575  }
3576  return 0;
3577  };
3578 
3579  // public:
3580  // ~rhs()
3581  // {
3582  // std::cout << "Enter destructor" << std::endl;
3583  // if(fluxBuffer)
3584  // delete [] fluxBuffer;
3585  // if(dFluxBuffer)
3586  // delete [] dFluxBuffer;
3587  // if(scaledPressure)
3588  // delete [] scaledPressure;
3589  // for(int iDV = 0;iDV < numDV;iDV++)
3590  // if(dvBuffersMine[iDV])
3591  // delete [] dvBuffers[iDV];
3592  // if(velHat)
3593  // delete [] velHat;
3594  // std::cout << "Exit destructor" << std::endl;
3595  // };
3596 
3597  protected:
3598 
3599  // Data handles
3600  int rhoHandle;
3626  int cHandle;
3629 
3630  // Parameters
3631  double time;
3632  double myDT;
3633  double myCFL;
3634  double inputCFL;
3635  double inputDT;
3636 
3638  // transport model specific
3639  double power;
3640  double beta;
3641  double bulkViscFac;
3643  double refPressure;
3644  double refRho;
3645  double refEnergy;
3646  double cfl;
3647  double refSndSpd;
3648  double refVelocity;
3649  double refLength;
3650  double refCp;
3651  double refCv;
3652  double refRe;
3653  double refPr;
3654  double refMu;
3655  double refKappa;
3661  double inputSigma;
3663 
3666  const double *powerPtr;
3667  const double *betaPtr;
3668  const double *bulkViscFacPtr;
3669  const double *timePtr;
3670  // const double *cflPtr;
3671  const double *sigmaPtr;
3672  const double *sigmaDilPtr;
3673  const double *cPtr;
3674  const double *rePtr;
3675  const double *prPtr;
3676  const double *cpPtr;
3677 
3681  int numDV;
3682  int numTV;
3684 
3685  // Grid data
3686  GridType *gridPtr;
3687  // StateType *targetStatePtr;
3688  // StateType targetState;
3690  std::vector<int> gridNeighbors;
3691  int numDim;
3692  std::vector<size_t> gridSizes;
3693  std::vector<size_t> bufferSizes;
3698  std::vector<double> gridDX;
3699  std::vector<double> gridDXM1;
3700  std::vector<double> minSpacing;
3702 
3704  std::vector<double> gridMetric;
3705 
3706  std::vector<int> periodicDirs;
3707  std::vector<size_t> opInterval;
3708  HaloType *haloPtr;
3709  std::vector<std::vector<size_t> > fortranBufferIntervals;
3710  std::vector<std::vector<size_t> > fortranPartitionBufferIntervals;
3711  StateType targetState;
3712  StateType rhsState;
3713 
3714 
3715  // input State information
3716  double *rhoPtr;
3717  double *rhoVPtr;
3718  double *rhoEPtr;
3719  double *scalarPtr;
3720 
3721  double *rhoTargetPtr;
3722  double *rhoVTargetPtr;
3723  double *rhoETargetPtr;
3725  double *pressurePtr;
3727  double *muPtr;
3728  double *lambdaPtr;
3729  double *kappaPtr;
3730 
3731  double *dtPtr;
3732  double *cflPtr;
3733 
3734  // rhs State information
3735  double *rhoRHSPtr;
3736  double *rhoVRHSPtr;
3737  double *rhoERHSPtr;
3738  double *scalarRHSPtr;
3739 
3740  // flux storage
3741  double *massFluxPtr;
3743  double *energyFluxPtr;
3744  double *scalarFluxPtr;
3746 
3747  // Auxiliary data
3748  double *velocityPtr;
3749  double *rhom1Ptr;
3751  double *velHat;
3752  double *viscidEnergy;
3753  double *velDiv;
3756 
3757  // Source data
3758  double *massSourcePtr;
3762 
3763  // Halo data storage
3764  std::vector< std::vector<double *> > haloDVBuffers;
3765  std::vector< std::vector<double *> > haloFluxBuffers;
3766  std::vector<std::vector<double *> > haloStateBuffers;
3767 
3768  // My storage
3769  std::vector<bool> dvBuffersMine;
3770  std::vector<double *> dvBuffers;
3771  std::vector<double *> tvBuffers;
3772  std::vector<double *> inviscidFluxBuffers;
3773  std::vector<double *> viscidFluxBuffers;
3774  std::vector<double *> dFluxBuffers;
3775  std::vector<double *> myStateBuffers;
3776  std::vector<double *> targetStateBuffers;
3777  std::vector<double *> myRHSBuffers;
3778  std::vector<double *> velGradBuffers; // dv_i/dx_j
3779  std::vector<double *> temperatureGradBuffers; // dT/dx_i
3780  std::vector<double *> tauBuffers;
3781  std::vector<double *> heatFluxBuffers;
3782  double *dvBuffer;
3783  double *tvBuffer;
3786  double *dFluxBuffer;
3788  double *scaledTau;
3791  double *tauBufferPtr;
3793 
3794  // Operator
3795  OperatorType myOperator;
3796  OperatorType artDissOperator;
3797  OperatorType artDissOperatorSplit;
3798 
3807  int *iMask;
3812 
3813  // bookkeeping
3818  std::vector<bool> haveHalo;
3820 
3821  // message Ids
3825  // int numDVMessageComponents;
3827 
3828  // Boundary condition support
3829  std::vector<GridRegionType> *subRegionsPtr;
3830  std::vector<BoundaryType> *domainBoundariesPtr;
3831  std::vector<BCType> *domainBCsPtr;
3833 
3834  // Processing options
3839  bool useWENO;
3840 
3841  // WENO data structures
3843 
3844  // equation of state
3848 
3849  };
3850 
3851 }
3852 #endif
double * temperatureGradBufferPtr
void AccumulateViscousFluxes(double a, std::vector< double *> &X, std::vector< double *> &Y, int threadId, bool fullInterval=false)
OperatorType artDissOperatorSplit
void const size_t const size_t const size_t const int const int const double * gridMetric
Definition: EulerKernels.H:10
std::vector< double * > myStateBuffers
const double * rePtr
subroutine vhatcomponent(numDim, numPointsBuffer, bufferSizes, opInterval, gridType, gridMetric, velDir, velocity, velHatComponent)
Definition: MetricOps.f90:59
int ComputeDVBuffer(const pcpp::IndexIntervalType &regionInterval, const std::vector< size_t > &bufferSizes, const std::vector< double *> &stateBuffers, std::vector< double *> &dvBuffers)
Definition: EulerUtil.C:153
subroutine assignmentyx(numDim, numPoints, bufferSize, bufferInterval, X, Y)
ASSIGNMENTYX point-wise operator performing Y = X.
Definition: Operators.f90:1170
int ComputeTauBuffer(const pcpp::IndexIntervalType &regionInterval, const std::vector< size_t > &bufferSize, int gridType, const std::vector< double > &gridMetrics, const std::vector< double > &gridJacobian, const double &Re, const std::vector< double *> &tvBuffers, const std::vector< double *> &velGradBuffers, std::vector< double *> &tauBuffers)
Definition: ViscidUtil.C:113
subroutine noslip_isothermal(numDim, bufferSizes, numPointsBuffer, patchNormalDir, patchSizes, numPointsPatch, numPatchPointsOp, patchPointsOp, gridType, gridMetric, jacobianDeterminant, bcParams, gasParams, rhoBuffer, rhoVBuffer, rhoEBuffer, numscalar, scalarBuffer, rhoRHS, rhoVRHS, rhoERHS, scalarRHS, rhoTarget, rhoVTarget, rhoETarget, scalarTarget, muBuffer, lambdaBuffer)
Definition: SATUtil.f90:727
subroutine dissipationweight(numDim, bufferSize, numPoints, bufferInterval, sigmaDissipation, sigmaDilatation, dilatationRamp, dilatationCutoff, divV, sigmaDiss)
Definition: SATUtil.f90:1872
subroutine ywxpy(numDim, numPoints, bufferSize, bufferInterval, W, X, Y)
YWXPY point-wise operator performing Y = WX + Y, where all are vectors.
Definition: Operators.f90:835
void SetDomainBCs(std::vector< BoundaryType > &domainBoundaries, std::vector< BCType > &domainBCs)
std::vector< size_t > opInterval
rhs_base_t::BCType BCType
virtual int ProcessHalo(HaloType &inHalo, GridType &inGrid, StateType &inState, StateType &rhsState)
std::vector< std::vector< size_t > > fortranPartitionBufferIntervals
void PackBufferMessage(int messageId, int numEquations, double *dataBuffer, int threadId)
std::vector< double * > & TemperatureGradBuffers()
const std::string & Name() const
Definition: Boundary.H:193
std::vector< BoundaryType > * domainBoundariesPtr
int HaloSetup(HaloType &inHalo)
int HandleBoundaryConditions(int threadId)
void size_t int size_t int size_t int int * stencilSizes
WENO::CoeffsWENO & WENOCoefficients()
int ExchangeDV(int threadId)
int SetGrid(GridType &inGrid)
void GetFlatIndices(const sizeextent &extent, ContainerType &indices) const
Definition: IndexUtil.H:302
int InitStep(GridType &inGrid, StateType &inState)
std::vector< int > gridNeighbors
std::vector< int > & PeriodicDirs()
Definition: Grid.H:160
void Flatten(ContainerType &output) const
Definition: IndexUtil.H:274
void size_t int size_t int size_t int int int int double int int double double *void size_t int size_t int int int int int double int size_t size_t size_t double double *void size_t int size_t int size_t size_t int double int double double *void size_t size_t size_t double double * X
int ComputeStressTensor(int threadId)
OperatorType artDissOperator
std::vector< double > gridDXM1
std::vector< double * > heatFluxBuffers
int ComputeDV(int threadId)
std::vector< double * > dvBuffers
void double double double double double double double *void const size_t const size_t const size_t const double const double const double const double const double double * sigmaDiss
Definition: SATKernels.H:48
rhs_base_t::BoundaryType BoundaryType
const double * betaPtr
void size_t int size_t int size_t int int int int double int int double * uBuffer
eos::perfect_gas * perfectGasPtr
int ComputeTV(int threadId)
int ExchangeInviscidFlux(int threadId)
subroutine applyoperator(numDim, dimSizes, numComponents, numPoints, opDir, opInterval, numStencils, stencilSizes, stencilStarts, numValues, stencilWeights, stencilOffsets, stencilID, U, dU)
applyoperator applies an operator specified as a stencil set to the provided state data ...
Definition: Operators.f90:36
std::vector< BCType > * domainBCsPtr
void AccumulateToRHS(double a, double *dfBuffer, int threadId)
void UnpackDVMessage(int threadId)
Definition: Euler.f90:1
void ReconstPointVal(double vals[], CoeffsWENO &coeffs, int group, double &pVal)
Definition: WENO.C:11
int ComputeHeatFluxBuffer(const pcpp::IndexIntervalType &regionInterval, const std::vector< size_t > &bufferSize, int gridType, const std::vector< double > &gridMetrics, const std::vector< double > &gridJacobian, const double &Re, const std::vector< double *> &tvBuffers, const std::vector< double *> &temperatureGradBuffers, std::vector< double *> &heatFluxBuffers)
Definition: ViscidUtil.C:627
int ExchangeBuffer(int messageId, int numEquations, double *dataBuffer, int threadId)
const double * sigmaPtr
void size_t int size_t int size_t int int int * stencilStarts
void const size_t const size_t * gridSizes
Definition: EulerKernels.H:10
double EntropyFixEta(int n, double lambdaL[], double lambdaR[])
Definition: WENO.C:113
void RenewStream(std::ostringstream &outStream)
std::vector< double > gridMetric
WENO::CoeffsWENO coeffsWENO
virtual int ProcessInterior(GridType &inGrid, StateType &inState, StateType &rhsState)
void const size_t const size_t const size_t const int const int const double const double const double const double const double * velHat
Definition: EulerKernels.H:10
std::vector< double * > & TVBuffers()
int SetState(StateType &inState)
void const size_t const size_t const size_t const int const int * gridType
Definition: EulerKernels.H:10
std::vector< size_t > bufferSizes
int ApplyDissipationOperator(int dDir, int numComponents, double *uBuffer, double *duBuffer, bool split=false, int threadId=0)
void SpongeZone(const std::vector< size_t > &bufferSizes, int normalDir, const pcpp::IndexIntervalType &spongeRegion, const pcpp::IndexIntervalType &partitionOpInterval, const pcpp::IndexIntervalType &opBufferInterval, const double *bcParams, const int *bcFlags, const int *iMask, const int disabledMask, const int numEquations, const std::vector< double *> &stateBuffers, const std::vector< double *> &targetBuffers, std::vector< double *> &rhsBuffers)
void const size_t const size_t const size_t * opInterval
Definition: EulerKernels.H:10
subroutine yxy(numDim, numPoints, bufferSize, bufferInterval, X, Y)
YXY point-wise operator performing Y = XY (all vectors)
Definition: Operators.f90:731
subroutine scalarflux1d(numDim, numPoints, gridSizes, opInterval, numScalars, scalarBuffer, velHat, fluxBuffer)
Flux for scalar transport.
Definition: Euler.f90:463
std::vector< double * > targetStateBuffers
int PrepareBuffers(int threadId)
std::vector< double > minSpacing
void const size_t const size_t const size_t const int const double const int const double * velocity
Definition: MetricKernels.H:19
simulation::grid::subregion GridRegionType
subroutine assignmentxa(numDim, numPoints, bufferSize, bufferInterval, a, X)
ASSIGNMENTXA point-wise operator performing X = scalar a.
Definition: Operators.f90:1222
virtual int ProcessBoundary(GridType &inGrid, StateType &inState, StateType &rhsState)
std::vector< double * > & StateBuffers()
std::vector< double * > viscidFluxBuffers
subroutine strongflux1d(numDim, fluxDir, gridSizes, numPoints, opInterval, gridType, gridMetric, tauBuffer, energyBuffer, fluxBuffer)
Compute the curvilinear cartesian viscous fluxes in 1 dimension.
Definition: Viscid.f90:126
void UnpackBufferMessage(int messageId, int numEquations, double *dataBuffer, int threadId)
std::vector< double * > & VelGradBuffers()
const double * cPtr
size_t NNodes() const
Definition: IndexUtil.H:254
int InviscidFlux(int iDim, int threadId)
int DetectHoles(int numDim, size_t *dimSizes, size_t *opInterval, int boundaryDepth, int holeBit, int *inMask, int *stencilID)
Detect unstructured holes and set up boundary stencil id&#39;s accordingly.
Definition: Stencil.C:1454
fixtures::CommunicatorType * gridCommPtr
const double * cpPtr
double * InviscidFluxBuffer()
int SetupRHS(GridType &inGrid, StateType &inState, StateType &rhsState, int myThreadId=0)
virtual int ProcessInterior2(GridType &inGrid, StateType &inState, StateType &rhsState)
void ExchangeBufferMessage(int messageId)
void GridScaleRHS(int threadId)
void size_t int size_t int size_t int int int int double int * stencilOffsets
std::vector< int > periodicDirs
int HandleSources(const pcpp::IndexIntervalType &regionInterval)
std::vector< size_t > gridSizes
subroutine ijkgradtoxyzdiv(numDim, numPoints, gridSizes, opInterval, gridType, gridJacobian, gridMetric, gradVBuffer, divBuffer)
Definition: MetricOps.f90:129
int CreateMessageBuffers(int numComponents, int componentSize=8)
Definition: Grid.C:2850
subroutine farfield(numDim, bufferSizes, numPointsBuffer, patchNormalDir, patchSizes, numPointsPatch, numPatchPointsOp, patchPointsOp, gridType, gridMetric, jacobianDeterminant, bcParams, gasParams, rhoBuffer, rhoVBuffer, rhoEBuffer, viscousFluxBuffer, numscalar, scalarBuffer, rhoRHS, rhoVRHS, rhoERHS, scalarRHS, rhoTarget, rhoVTarget, rhoETarget, scalarTarget)
Definition: SATUtil.f90:16
int ComputeVelGrad(int threadId)
Definition: Viscid.f90:1
void const size_t const size_t const int const size_t const size_t const size_t const size_t const int const double const double const double const double const double const double const double const double const int * numScalar
Definition: SATKernels.H:10
std::vector< double > gridDX
int ArtificialDissipation(int threadId)
const double * sigmaDilPtr
std::vector< double * > velGradBuffers
subroutine flux1d(numDim, numPoints, gridSizes, opInterval, fluxDir, gridType, gridMetric, rhoBuffer, rhoVBuffer, rhoEBuffer, velHat, pressureBuffer, fluxBuffer)
Definition: Euler.f90:233
int Initialize(GridType &inGrid, StateType &inState, StateType &inParam, OperatorType &inOperator)
std::vector< std::vector< double * > > haloFluxBuffers
int Initialize(stencilset &stencilSet1, stencilset &stencilSet2, int interiorOrder)
Definition: Stencil.C:29
OperatorT OperatorType
std::vector< std::vector< double * > > haloDVBuffers
int RHS(int threadId)
std::vector< double * > & RHSBuffers()
int HandleSources(int myThreadId)
OperatorType myOperator
int ComputeTVBufferPower(const pcpp::IndexIntervalType &regionInterval, const std::vector< size_t > &bufferSize, const double *temperatureBuffer, std::vector< double *> &tvBuffer, const double beta, const double power, const double bulkViscFac, const double specificHeat, const double prandtlNumber)
Compute transport coefficients using the power law.
Definition: ViscidUtil.C:14
void Project(int n, int m, double T[], double vals[], double res[])
Definition: WENO.C:89
subroutine slip_adiabatic(numDim, bufferSizes, numPointsBuffer, patchNormalDir, patchSizes, numPointsPatch, numPatchPointsOp, patchPointsOp, gridType, gridMetric, jacobianDeterminant, bcParams, gasParams, rhoBuffer, rhoVBuffer, rhoEBuffer, numscalar, scalarBuffer, rhoRHS, rhoVRHS, rhoERHS, scalarRHS, rhoTarget, rhoVTarget, rhoETarget, scalarTarget)
Definition: SATUtil.f90:360
Main encapsulation of MPI.
Definition: COMM.H:62
void LinearSponge(const std::vector< size_t > &bufferSizes, int normalDir, const pcpp::IndexIntervalType &spongeRegion, const pcpp::IndexIntervalType &partitionOpInterval, const pcpp::IndexIntervalType &opBufferInterval, const double *bcParams, const int *bcFlags, const int numEquations, const std::vector< double *> &stateBuffers, const std::vector< double *> &targetBuffers, std::vector< double *> &rhsBuffers)
base API for equations of state
Definition: EOS.H:17
int CreateStencilConnectivity(int numDim, size_t *dimSizes, size_t *opInterval, int boundaryDepth, int *stencilID, bool fortranInterval=false)
Creates simple stencil connectivity assuming all domain boundaries are physical boundaries.
Definition: Stencil.C:840
void size_t int size_t int size_t int int int int double int int double double *void size_t int size_t int int int int int double int size_t size_t size_t double double *void size_t int size_t int size_t size_t int double int double double *void size_t size_t size_t double * a
simulation::grid::halo HaloType
sizeextent RelativeTranslation(const sizeextent &inOrigin, const sizeextent &inDestination) const
Definition: IndexUtil.H:535
int ComputeViscidEnergyFlux(int threadId)
void SetGlobal(pcpp::ParallelGlobalType &inGlobal)
subroutine alphaweight(numDim, numPointsBuffer, bufferSizes, opInterval, gridType, gridMetric, alphaDir, alphaW)
Definition: MetricOps.f90:14
int ExchangeState(int threadId)
int ComputeVelDiv(int threadId)
void size_t int size_t int size_t int int int int double int int double double * duBuffer
void const size_t const size_t const size_t const int const double * gridJacobian
Definition: MetricKernels.H:9
int ComputeHeatFlux(int threadId)
pcpp::IndexIntervalType partitionBufferInterval
const double * bulkViscFacPtr
void const size_t const size_t * bufferSizes
Definition: MetricKernels.H:19
int ComputeTemperatureGrad(int threadId)
void size_t int size_t int size_t int int int int double int int double double *void size_t int size_t int int int int int double int size_t size_t size_t double double *void size_t int size_t int size_t size_t int double int double double *void size_t size_t size_t double double double * Y
void size_t int size_t int size_t int int int int double int int double double *void size_t int size_t int int int int int double int size_t size_t size_t double double *void size_t int size_t int size_t size_t int double int double double *void size_t size_t size_t * bufferInterval
Perfect Gas Equation of State.
Definition: EOS.H:124
double TimeStep(double *outCFL=NULL)
void const size_t const size_t const int const size_t const size_t const size_t const size_t const int const double const double const double const double * gasParams
Definition: SATKernels.H:10
std::vector< std::vector< size_t > > fortranBufferIntervals
simulation::rhs::domain_base< GridType, StateType, OperatorType > rhs_base_t
subroutine zxy(numDim, numPoints, bufferSize, bufferInterval, X, Y, Z)
ZXY point-wise operator performing Z = XY (all vectors)
Definition: Operators.f90:679
void double double double double double double double *void const size_t const size_t const size_t const double * sigmaDissipation
Definition: SATKernels.H:48
std::vector< double * > inviscidFluxBuffers
void size_t int size_t int size_t int int int int double * stencilWeights
void size_t int size_t int size_t int * numStencils
const double * prPtr
int SetRHSState(StateType &rhsState)
int SetHalo(HaloType &inHalo)
int SetParamState(const StateType &paramState)
std::vector< size_t > Sizes() const
Definition: IndexUtil.H:217
std::vector< GridRegionType > * subRegionsPtr
std::vector< double * > myRHSBuffers
subroutine yaxpy(N1, N2, N3, localInterval, a, X, Y)
Definition: RKUtil.f90:113
const double * timePtr
std::vector< double * > tvBuffers
int SetupOperators(OperatorType &inOperator)
Simple Block Structured Mesh object.
Definition: IndexUtil.H:21
std::vector< bool > haveHalo
int ApplyOperator(int dDir, int numComponents, double *uBuffer, double *duBuffer, int threadId)
void size_t int * numComponents
int SetTargetState(StateType &inState)
std::vector< double * > dFluxBuffers
int ViscidFlux(int iDim, int threadId)
std::vector< bool > dvBuffersMine
void SetMask(const std::vector< size_t > &opIndices, int maskBits, int *inMask)
Definition: Stencil.C:1847
std::vector< double * > temperatureGradBuffers
subroutine sat_form_roe_matrices(ND, u_in, u_out, gamma, norm, tmat, tinv, lambda)
Definition: SATUtil.f90:1673
std::vector< std::vector< double * > > haloStateBuffers
pcpp::IndexIntervalType partitionInterval
void InitSimple(const ContainerType &inSize)
Definition: IndexUtil.H:169
void const size_t * numPointsBuffer
Definition: MetricKernels.H:19
void const size_t const size_t const int const size_t const size_t const size_t const size_t const int const double const double const double * bcParams
Definition: SATKernels.H:10
void FC_MODULE(euler, flux1d, EULER, FLUX1D)(const int *numDim
int ApplyWENO(int iDim, int threadId)
void PackDVMessage(int threadId)
int InitializeState(StateType &inState)
std::vector< double * > & DVBuffers()
const double * powerPtr
std::vector< double * > tauBuffers