REMORA
Energy Research and Forecasting: An Atmospheric Modeling Code
REMORAPhysBCFunct Class Reference

#include <REMORA_PhysBCFunct.H>

Collaboration diagram for REMORAPhysBCFunct:

Public Member Functions

 REMORAPhysBCFunct (const int lev, const amrex::Geometry &geom, const amrex::Vector< amrex::BCRec > &domain_bcs_type, const amrex::Gpu::DeviceVector< amrex::BCRec > &domain_bcs_type_d, amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NCONS > bc_extdir_vals)
 
 ~REMORAPhysBCFunct ()
 
void operator() (amrex::MultiFab &mf, int icomp, int ncomp, amrex::IntVect const &nghost, amrex::Real time, int bccomp)
 
void impose_xvel_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, amrex::Real time, int bccomp)
 
void impose_yvel_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, amrex::Real time, int bccomp)
 
void impose_zvel_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dx, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, amrex::Real time, int bccomp)
 
void impose_cons_bcs (const amrex::Array4< amrex::Real > &mf, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int icomp, int ncomp, amrex::Real time, int bccomp)
 

Private Attributes

int m_lev
 
amrex::Geometry m_geom
 
amrex::Vector< amrex::BCRec > m_domain_bcs_type
 
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
 
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NCONSm_bc_extdir_vals
 

Constructor & Destructor Documentation

◆ REMORAPhysBCFunct()

REMORAPhysBCFunct::REMORAPhysBCFunct ( const int  lev,
const amrex::Geometry &  geom,
const amrex::Vector< amrex::BCRec > &  domain_bcs_type,
const amrex::Gpu::DeviceVector< amrex::BCRec > &  domain_bcs_type_d,
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NCONS bc_extdir_vals 
)
inline
41  : m_lev(lev), m_geom(geom), m_domain_bcs_type(domain_bcs_type),
42  m_domain_bcs_type_d(domain_bcs_type_d),
43  m_bc_extdir_vals(bc_extdir_vals)
44  {}
int m_lev
Definition: REMORA_PhysBCFunct.H:89
amrex::Vector< amrex::BCRec > m_domain_bcs_type
Definition: REMORA_PhysBCFunct.H:91
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NCONS > m_bc_extdir_vals
Definition: REMORA_PhysBCFunct.H:93
amrex::Geometry m_geom
Definition: REMORA_PhysBCFunct.H:90
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
Definition: REMORA_PhysBCFunct.H:92

◆ ~REMORAPhysBCFunct()

REMORAPhysBCFunct::~REMORAPhysBCFunct ( )
inline
46 {}

Member Function Documentation

◆ impose_cons_bcs()

void REMORAPhysBCFunct::impose_cons_bcs ( const amrex::Array4< amrex::Real > &  mf,
const amrex::Box &  bx,
const amrex::Box &  domain,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dxInv,
int  icomp,
int  ncomp,
amrex::Real  time,
int  bccomp 
)
20 {
21  BL_PROFILE_VAR("impose_cons_bcs()",impose_cons_bcs);
22  const auto& dom_lo = amrex::lbound(domain);
23  const auto& dom_hi = amrex::ubound(domain);
24 
25  // Based on BCRec for the domain, we need to make BCRec for this Box
26  // bccomp is used as starting index for m_domain_bcs_type
27  // 0 is used as starting index for bcrs
28  Vector<BCRec> bcrs(ncomp);
29  amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
30 
31  // xlo: ori = 0
32  // ylo: ori = 1
33  // zlo: ori = 2
34  // xhi: ori = 3
35  // yhi: ori = 4
36  // zhi: ori = 5
37 
38  amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
39 #ifdef AMREX_USE_GPU
40  Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
41 #else
42  std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
43 #endif
44  const amrex::BCRec* bc_ptr = bcrs_d.data();
45 
46  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NCONS> l_bc_extdir_vals_d;
47 
48  for (int i = 0; i < ncomp; i++)
49  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++)
50  l_bc_extdir_vals_d[i][ori] = m_bc_extdir_vals[bccomp+i][ori];
51 
52  GeometryData const& geomdata = m_geom.data();
53  bool is_periodic_in_x = geomdata.isPeriodic(0);
54  bool is_periodic_in_y = geomdata.isPeriodic(1);
55 
56  // First do all ext_dir bcs
57  if (!is_periodic_in_x)
58  {
59  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
60  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
61  ParallelFor(
62  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
63  if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
64  dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][0];
65  }
66  },
67  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
68  if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
69  dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][3];
70  }
71  }
72  );
73  }
74 
75  if (!is_periodic_in_y)
76  {
77  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
78  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
79  ParallelFor(
80  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
81  if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
82  dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][1];
83  }
84  },
85  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
86  if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
87  dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][4];
88  }
89  }
90  );
91  }
92 
93  {
94  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
95  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
96  ParallelFor(
97  bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
98  if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
99  dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][2];
100  }
101  },
102  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
103  if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
104  dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][5];
105  }
106  }
107  );
108  }
109 
110  // Next do ghost cells in x-direction but not reaching out in y
111  // The corners we miss here will be covered in the y-loop below or by periodicity
112  if (!is_periodic_in_x)
113  {
114  // Populate ghost cells on lo-x and hi-x domain boundaries
115  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
116  bx_xlo.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
117  bx_xlo.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
118  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
119  bx_xhi.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
120  bx_xhi.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
121  ParallelFor(bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
122  int iflip = dom_lo.x - 1 - i;
123  if (bc_ptr[n].lo(0) == REMORABCType::foextrap) {
124  dest_arr(i,j,k,icomp+n) = dest_arr(dom_lo.x,j,k,icomp+n);
125  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
126  dest_arr(i,j,k,icomp+n) = dest_arr(iflip,j,k,icomp+n);
127  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
128  dest_arr(i,j,k,icomp+n) = -dest_arr(iflip,j,k,icomp+n);
129  }
130  },
131  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
132  int iflip = 2*dom_hi.x + 1 - i;
133  if (bc_ptr[n].hi(0) == REMORABCType::foextrap) {
134  dest_arr(i,j,k,icomp+n) = dest_arr(dom_hi.x,j,k,icomp+n);
135  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
136  dest_arr(i,j,k,icomp+n) = dest_arr(iflip,j,k,icomp+n);
137  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
138  dest_arr(i,j,k,icomp+n) = -dest_arr(iflip,j,k,icomp+n);
139  }
140  }
141  );
142  }
143 
144  if (!is_periodic_in_y)
145  {
146  // Populate ghost cells on lo-y and hi-y domain boundaries
147  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
148  bx_ylo.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
149  bx_ylo.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
150  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
151  bx_yhi.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
152  bx_yhi.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
153  ParallelFor(
154  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
155  int jflip = dom_lo.y - 1 - j;
156  if (bc_ptr[n].lo(1) == REMORABCType::foextrap) {
157  dest_arr(i,j,k,icomp+n) = dest_arr(i,dom_lo.y,k,icomp+n);
158  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
159  dest_arr(i,j,k,icomp+n) = dest_arr(i,jflip,k,icomp+n);
160  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
161  dest_arr(i,j,k,icomp+n) = -dest_arr(i,jflip,k,icomp+n);
162  }
163  },
164  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
165  int jflip = 2*dom_hi.y + 1 - j;
166  if (bc_ptr[n].hi(1) == REMORABCType::foextrap) {
167  dest_arr(i,j,k,icomp+n) = dest_arr(i,dom_hi.y,k,icomp+n);
168  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
169  dest_arr(i,j,k,icomp+n) = dest_arr(i,jflip,k,icomp+n);
170  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
171  dest_arr(i,j,k,icomp+n) = -dest_arr(i,jflip,k,icomp+n);
172  }
173  }
174  );
175  }
176 
177  {
178  Box bx_zlo(bx); bx_zlo.setBig (2,std::max(dom_lo.z-1,bx.smallEnd(2)));
179  Box bx_zhi(bx); bx_zhi.setSmall(2,std::min(dom_hi.z+1,bx.bigEnd(2)));
180  // Populate ghost cells on lo-z and hi-z domain boundaries
181 
182  if (bx_zlo.ok()) {
183  ParallelFor(bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
184  {
185  int kflip = dom_lo.z - 1 - i;
186  if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
187  dest_arr(i,j,k,icomp+n) = dest_arr(i,j,dom_lo.z,icomp+n);
188  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
189  dest_arr(i,j,k,icomp+n) = dest_arr(i,j,kflip,icomp+n);
190  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
191  dest_arr(i,j,k,icomp+n) = -dest_arr(i,j,kflip,icomp+n);
192  }
193  });
194  }
195 
196  if (bx_zlo.ok()) {
197  ParallelFor(bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
198  {
199  int kflip = 2*dom_hi.z + 1 - i;
200  if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
201  dest_arr(i,j,k,icomp+n) = dest_arr(i,j,dom_hi.z,icomp+n);
202  } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
203  dest_arr(i,j,k,icomp+n) = dest_arr(i,j,kflip,icomp+n);
204  } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
205  dest_arr(i,j,k,icomp+n) = -dest_arr(i,j,kflip,icomp+n);
206  }
207  });
208  }
209  }
210 
211  Gpu::streamSynchronize();
212 }
#define NCONS
Definition: IndexDefines.H:11
void impose_cons_bcs(const amrex::Array4< amrex::Real > &mf, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int icomp, int ncomp, amrex::Real time, int bccomp)
Definition: BoundaryConditions_cons.cpp:17
@ reflect_odd
Definition: IndexDefines.H:54
@ ext_dir
Definition: IndexDefines.H:58
@ reflect_even
Definition: IndexDefines.H:56
@ foextrap
Definition: IndexDefines.H:57

◆ impose_xvel_bcs()

void REMORAPhysBCFunct::impose_xvel_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dxInv,
amrex::Real  time,
int  bccomp 
)
15 {
16  BL_PROFILE_VAR("impose_xvel_bcs()",impose_xvel_bcs);
17  const auto& dom_lo = amrex::lbound(domain);
18  const auto& dom_hi = amrex::ubound(domain);
19 
20  // Based on BCRec for the domain, we need to make BCRec for this Box
21  // bccomp is used as starting index for m_domain_bcs_type
22  // 0 is used as starting index for bcrs
23  int ncomp = 1;
24  Vector<BCRec> bcrs(ncomp);
25  amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
26 
27  // xlo: ori = 0
28  // ylo: ori = 1
29  // zlo: ori = 2
30  // xhi: ori = 3
31  // yhi: ori = 4
32  // zhi: ori = 5
33 
34  amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
35 #ifdef AMREX_USE_GPU
36  Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
37 #else
38  std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
39 #endif
40  const amrex::BCRec* bc_ptr = bcrs_d.data();
41 
42  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NCONS> l_bc_extdir_vals_d;
43 
44  for (int i = 0; i < ncomp; i++)
45  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++)
46  l_bc_extdir_vals_d[i][ori] = m_bc_extdir_vals[bccomp+i][ori];
47 
48  GeometryData const& geomdata = m_geom.data();
49  bool is_periodic_in_x = geomdata.isPeriodic(0);
50  bool is_periodic_in_y = geomdata.isPeriodic(1);
51 
52  // First do all ext_dir bcs
53  if (!is_periodic_in_x)
54  {
55  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
56  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+2);
57  Box bx_xlo_face(bx); bx_xlo_face.setSmall(0,dom_lo.x ); bx_xlo_face.setBig(0,dom_lo.x );
58  Box bx_xhi_face(bx); bx_xhi_face.setSmall(0,dom_hi.x+1); bx_xhi_face.setBig(0,dom_hi.x+1);
59  ParallelFor(
60  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
61  int iflip = dom_lo.x - i;
62  if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
63  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][0];
64  } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap) {
65  dest_arr(i,j,k) = dest_arr(dom_lo.x,j,k);
66  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
67  dest_arr(i,j,k) = dest_arr(iflip,j,k);
68  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
69  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
70  }
71  },
72  // We only set the values on the domain faces themselves if EXT_DIR
73  bx_xlo_face, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
74  if (bc_ptr[n].lo(0) == REMORABCType::ext_dir)
75  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][0];
76  }
77  );
78  ParallelFor(
79  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
80  int iflip = 2*(dom_hi.x + 1) - i;
81  if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
82  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][3];
83  } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap) {
84  dest_arr(i,j,k) = dest_arr(dom_hi.x+1,j,k);
85  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
86  dest_arr(i,j,k) = dest_arr(iflip,j,k);
87  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
88  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
89  }
90  },
91  // We only set the values on the domain faces themselves if EXT_DIR
92  bx_xhi_face, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
93  if (bc_ptr[n].lo(3) == REMORABCType::ext_dir)
94  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][3];
95  }
96  );
97  } // not periodic in x
98 
99  if (!is_periodic_in_y)
100  {
101  // Populate ghost cells on lo-y and hi-y domain boundaries
102  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
103  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
104  ParallelFor(
105  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
106  int jflip = dom_lo.y - 1 - j;
107  if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
108  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][1];
109  } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap) {
110  dest_arr(i,j,k) = dest_arr(i,dom_lo.y,k);
111  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
112  dest_arr(i,j,k) = dest_arr(i,jflip,k);
113  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
114  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
115  }
116  },
117  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
118  int jflip = 2*dom_hi.y + 1 - j;
119  if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
120  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][4];
121  } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap) {
122  dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k);
123  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
124  dest_arr(i,j,k) = dest_arr(i,jflip,k);
125  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
126  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
127  }
128  }
129  );
130  } // not periodic in y
131 
132  {
133  // Populate ghost cells on lo-z and hi-z domain boundaries
134  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
135  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
136  ParallelFor(
137  bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
138  int kflip = dom_lo.z - 1 - k;
139  if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
140  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][2];
141  } else if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
142  dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z);
143  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
144  dest_arr(i,j,k) = dest_arr(i,j,kflip);
145  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
146  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
147  }
148  },
149  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
150  int kflip = 2*dom_hi.z + 1 - k;
151  if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
152  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][5];
153  } else if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
154  dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z);
155  } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
156  dest_arr(i,j,k) = dest_arr(i,j,kflip);
157  } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
158  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
159  }
160  }
161  );
162  } // z
163 
164 
165  Gpu::streamSynchronize();
166 }
void impose_xvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, amrex::Real time, int bccomp)
Definition: BoundaryConditions_xvel.cpp:12

◆ impose_yvel_bcs()

void REMORAPhysBCFunct::impose_yvel_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dxInv,
amrex::Real  time,
int  bccomp 
)
15 {
16  BL_PROFILE_VAR("impose_yvel_bcs()",impose_yvel_bcs);
17  const auto& dom_lo = amrex::lbound(domain);
18  const auto& dom_hi = amrex::ubound(domain);
19 
20  // Based on BCRec for the domain, we need to make BCRec for this Box
21  // bccomp is used as starting index for m_domain_bcs_type
22  // 0 is used as starting index for bcrs
23  int ncomp = 1;
24  Vector<BCRec> bcrs(ncomp);
25  amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
26 
27  // xlo: ori = 0
28  // ylo: ori = 1
29  // zlo: ori = 2
30  // xhi: ori = 3
31  // yhi: ori = 4
32  // zhi: ori = 5
33 
34  amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
35 #ifdef AMREX_USE_GPU
36  Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
37 #else
38  std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
39 #endif
40  const amrex::BCRec* bc_ptr = bcrs_d.data();
41 
42  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>, AMREX_SPACEDIM+NCONS> l_bc_extdir_vals_d;
43 
44  for (int i = 0; i < ncomp; i++)
45  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++)
46  l_bc_extdir_vals_d[i][ori] = m_bc_extdir_vals[bccomp+i][ori];
47 
48  GeometryData const& geomdata = m_geom.data();
49  bool is_periodic_in_x = geomdata.isPeriodic(0);
50  bool is_periodic_in_y = geomdata.isPeriodic(1);
51 
52  // First do all ext_dir bcs
53  if (!is_periodic_in_x)
54  {
55  // Populate ghost cells on lo-x and hi-x domain boundaries
56  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
57  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
58  ParallelFor(
59  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
60  int iflip = dom_lo.x - 1- i;
61  if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
62  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][0];
63  } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap) {
64  dest_arr(i,j,k) = dest_arr(dom_lo.x,j,k);
65  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
66  dest_arr(i,j,k) = dest_arr(iflip,j,k);
67  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
68  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
69  }
70  },
71  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
72  int iflip = 2*dom_hi.x + 1 - i;
73  if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
74  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][3];
75  } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap) {
76  dest_arr(i,j,k) = dest_arr(dom_hi.x,j,k);
77  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
78  dest_arr(i,j,k) = dest_arr(iflip,j,k);
79  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
80  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
81  }
82  }
83  );
84  }
85 
86  if (!is_periodic_in_y)
87  {
88  // Populate ghost cells on lo-y and hi-y domain boundaries
89  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
90  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+2);
91  Box bx_ylo_face(bx); bx_ylo_face.setSmall(1,dom_lo.y ); bx_ylo_face.setBig(1,dom_lo.y );
92  Box bx_yhi_face(bx); bx_yhi_face.setSmall(1,dom_hi.y+1); bx_yhi_face.setBig(1,dom_hi.y+1);
93 
94  ParallelFor(
95  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
96  int jflip = dom_lo.y-j;
97  if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
98  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][1];
99  } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap) {
100  dest_arr(i,j,k) = dest_arr(i,dom_lo.y,k);
101  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
102  dest_arr(i,j,k) = dest_arr(i,jflip,k);
103  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
104  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
105  }
106  },
107  // We only set the values on the domain faces themselves if EXT_DIR
108  bx_ylo_face, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
109  if (bc_ptr[n].lo(1) == REMORABCType::ext_dir)
110  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][1];
111  }
112  );
113  ParallelFor(
114  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
115  int jflip = 2*(dom_hi.y + 1) - j;
116  if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
117  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][4];
118  } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap) {
119  dest_arr(i,j,k) = dest_arr(i,dom_hi.y+1,k);
120  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
121  dest_arr(i,j,k) = dest_arr(i,jflip,k);
122  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
123  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
124  }
125  },
126  // We only set the values on the domain faces themselves if EXT_DIR
127  bx_yhi_face, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
128  if (bc_ptr[n].lo(4) == REMORABCType::ext_dir)
129  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][4];
130  }
131  );
132  }
133 
134  {
135  // Populate ghost cells on lo-z and hi-z domain boundaries
136  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
137  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
138  ParallelFor(
139  bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
140  int kflip = dom_lo.z - 1 - k;
141  if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
142  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][2];
143  } else if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
144  dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z);
145  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
146  dest_arr(i,j,k) = dest_arr(i,j,kflip);
147  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
148  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
149  }
150  },
151  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
152  int kflip = 2*dom_hi.z + 1 - k;
153  if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
154  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][5];
155  } else if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
156  dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z);
157  } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
158  dest_arr(i,j,k) = dest_arr(i,j,kflip);
159  } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
160  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
161  }
162  }
163  );
164  }
165 
166  Gpu::streamSynchronize();
167 }
void impose_yvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, amrex::Real time, int bccomp)
Definition: BoundaryConditions_yvel.cpp:12

◆ impose_zvel_bcs()

void REMORAPhysBCFunct::impose_zvel_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dx,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dxInv,
amrex::Real  time,
int  bccomp 
)

Based on BCRec for the domain, we need to make BCRec for this Box

17 {
18  const auto& dom_lo = amrex::lbound(domain);
19  const auto& dom_hi = amrex::ubound(domain);
20 
21  // Based on BCRec for the domain, we need to make BCRec for this Box
22  // bccomp is used as starting index for m_domain_bcs_type
23  // 0 is used as starting index for bcrs
24  int ncomp = 1;
25  Vector<BCRec> bcrs(ncomp);
26  amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
27 
28  // xlo: ori = 0
29  // ylo: ori = 1
30  // zlo: ori = 2
31  // xhi: ori = 3
32  // yhi: ori = 4
33  // zhi: ori = 5
34 
35  //! Based on BCRec for the domain, we need to make BCRec for this Box
36  // bccomp is used as starting index for m_domain_bcs_type
37  // 0 is used as starting index for bcrs
38  amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
39 
40  amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
41 #ifdef AMREX_USE_GPU
42  Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
43 #else
44  std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
45 #endif
46  const amrex::BCRec* bc_ptr = bcrs_d.data();
47 
48  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NCONS> l_bc_extdir_vals_d;
49 
50  for (int i = 0; i < ncomp; i++)
51  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++)
52  l_bc_extdir_vals_d[i][ori] = m_bc_extdir_vals[bccomp+i][ori];
53 
54  GeometryData const& geomdata = m_geom.data();
55  bool is_periodic_in_x = geomdata.isPeriodic(0);
56  bool is_periodic_in_y = geomdata.isPeriodic(1);
57 
58  // First do all ext_dir bcs
59  if (!is_periodic_in_x)
60  {
61  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
62  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
63  ParallelFor(
64  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
65  int iflip = dom_lo.x - 1 - i;
66  if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
67  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][0];
68  } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap) {
69  dest_arr(i,j,k) = dest_arr(dom_lo.x,j,k);
70  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
71  dest_arr(i,j,k) = dest_arr(iflip,j,k);
72  } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
73  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
74  }
75  },
76  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
77  int iflip = 2*dom_hi.x + 1 - i;
78  if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
79  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][3];
80  } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap) {
81  dest_arr(i,j,k) = dest_arr(dom_hi.x,j,k);
82  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
83  dest_arr(i,j,k) = dest_arr(iflip,j,k);
84  } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
85  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
86  }
87  }
88  );
89  }
90 
91  // First do all ext_dir bcs
92  if (!is_periodic_in_y)
93  {
94  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
95  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
96  ParallelFor(bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
97  int jflip = dom_lo.y - 1 - j;
98  if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
99  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][1];
100  } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap) {
101  dest_arr(i,j,k) = dest_arr(i,dom_lo.y,k);
102  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
103  dest_arr(i,j,k) = dest_arr(i,jflip,k);
104  } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
105  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
106  }
107  },
108  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
109  int jflip = 2*dom_hi.y + 1 - j;
110  if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
111  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][4];
112  } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap) {
113  dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k);
114  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
115  dest_arr(i,j,k) = dest_arr(i,jflip,k);
116  } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
117  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
118  }
119  });
120  }
121 
122  {
123  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
124  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+2);
125 
126  // Populate face values on z-boundaries themselves only if EXT_DIR
127  Box bx_zlo_face(bx); bx_zlo_face.setSmall(2,dom_lo.z ); bx_zlo_face.setBig(2,dom_lo.z );
128  Box bx_zhi_face(bx); bx_zhi_face.setSmall(2,dom_hi.z+1); bx_zhi_face.setBig(2,dom_hi.z+1);
129 
130  ParallelFor(bx_zlo, [=] AMREX_GPU_DEVICE (int i, int j, int k) {
131  int n = 0;
132  int kflip = dom_lo.z - k;
133  if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
134  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][2];
135  } else if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
136  dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z);
137  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
138  dest_arr(i,j,k) = dest_arr(i,j,kflip);
139  } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
140  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
141  }
142  });
143 
144  ParallelFor(bx_zlo_face, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
145  if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
146  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][2];
147  }
148  }
149  );
150 
151  ParallelFor(
152  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
153  int kflip = 2*(dom_hi.z + 1) - k;
154  if (bc_ptr[n].hi(5) == REMORABCType::ext_dir) {
155  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][5];
156  } else if (bc_ptr[n].hi(5) == REMORABCType::foextrap) {
157  dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z+1);
158  } else if (bc_ptr[n].hi(5) == REMORABCType::reflect_even) {
159  dest_arr(i,j,k) = dest_arr(i,j,kflip);
160  } else if (bc_ptr[n].hi(5) == REMORABCType::reflect_odd) {
161  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
162  }
163 
164  },
165  bx_zhi_face, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
166  if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
167  dest_arr(i,j,k) = l_bc_extdir_vals_d[n][5];
168  }
169  }
170  );
171  }
172 
173  Gpu::streamSynchronize();
174 }

◆ operator()()

void REMORAPhysBCFunct::operator() ( amrex::MultiFab &  mf,
int  icomp,
int  ncomp,
amrex::IntVect const &  nghost,
amrex::Real  time,
int  bccomp 
)
21 {
22  if (m_geom.isAllPeriodic()) return;
23 
24  BL_PROFILE("REMORAPhysBCFunct::()");
25 
26  const auto& domain = m_geom.Domain();
27  const auto dxInv = m_geom.InvCellSizeArray();
28 
29  // Create a grown domain box containing valid + periodic cells
30  Box gdomain = amrex::convert(domain, mf.boxArray().ixType());
31  for (int i = 0; i < AMREX_SPACEDIM; ++i) {
32  if (m_geom.isPeriodic(i)) {
33  gdomain.grow(i, nghost[i]);
34  }
35  }
36 
37 #ifdef AMREX_USE_OMP
38 #pragma omp parallel if (Gpu::notInLaunchRegion())
39 #endif
40  {
41  if (mf.boxArray()[0].ixType() == IndexType(IntVect(0,0,0))) {
42 
43  // Cell-centered arrays only
44  for (MFIter mfi(mf); mfi.isValid(); ++mfi)
45  {
46  const Array4<Real>& dest_arr = mf.array(mfi);
47  Box bx = mfi.validbox(); bx.grow(nghost);
48 
49  if (!gdomain.contains(bx)) {
50  AMREX_ALWAYS_ASSERT(icomp == 0 && icomp+ncomp <= NCONS);
51  impose_cons_bcs(dest_arr,bx,domain,dxInv,icomp,ncomp,time,bccomp);
52  }
53  } // mfi
54 
55  } else {
56 
57  // Face-based arrays only
58  for (MFIter mfi(mf); mfi.isValid(); ++mfi)
59  {
60  Box bx = mfi.validbox(); bx.grow(nghost);
61  if (!gdomain.contains(bx)) {
62  if(bx.ixType() == IndexType(IntVect(1,0,0))) {
63  const Array4<Real>& dest_arr = mf.array(mfi,0);
64  impose_xvel_bcs(dest_arr,bx,domain,dxInv,time,bccomp);
65  } else if (bx.ixType() == IndexType(IntVect(0,1,0))) {
66  const Array4<Real>& dest_arr = mf.array(mfi,0);
67  impose_yvel_bcs(dest_arr,bx,domain,dxInv,time,bccomp);
68  }
69  }
70  } // mfi
71  } // box type
72  } // OpenMP
73 } // operator()

Member Data Documentation

◆ m_bc_extdir_vals

amrex::Array<amrex::Array<amrex::Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NCONS> REMORAPhysBCFunct::m_bc_extdir_vals
private

◆ m_domain_bcs_type

amrex::Vector<amrex::BCRec> REMORAPhysBCFunct::m_domain_bcs_type
private

◆ m_domain_bcs_type_d

amrex::Gpu::DeviceVector<amrex::BCRec> REMORAPhysBCFunct::m_domain_bcs_type_d
private

◆ m_geom

amrex::Geometry REMORAPhysBCFunct::m_geom
private

◆ m_lev

int REMORAPhysBCFunct::m_lev
private

The documentation for this class was generated from the following files: