REMORA
Regional Modeling of Oceans Refined Adaptively
Loading...
Searching...
No Matches
REMORA_BoundaryConditions_cons.cpp
Go to the documentation of this file.
1#include "AMReX_PhysBCFunct.H"
3
4using namespace amrex;
5
6/**
7 * @param[inout] dest_arr data on which to apply BCs
8 * @param[in ] bx box to update on
9 * @param[in ] valid_bx valid box
10 * @param[in ] domain domain box
11 * @param[in ] dxInv pm or pn
12 * @param[in ] mskr land-sea mask on rho-points
13 * @param[in ] msku land-sea mask on u-points
14 * @param[in ] mskv land-sea mask on v-points
15 * @param[in ] calc_arr data to use in the RHS of calculations
16 * @param[in ] icomp component to update
17 * @param[in ] ncomp number of components to update, starting from icomp
18 * @param[in ] time current time
19 * @param[in ] bccomp index into both domain_bcs_type_bcr and bc_extdir_vals for icomp=0
20 * @param[in ] n_not_fill perimter of cells in x and y where BCs are not applied for non-ext_dir conditions
21 */
22void REMORAPhysBCFunct::impose_cons_bcs (const Array4<Real>& dest_arr, const Box& bx, const Box& valid_bx, const Box& domain,
23 const GpuArray<Real,AMREX_SPACEDIM> /*dxInv*/, const Array4<const Real>& mskr,
24 const Array4<const Real>& msku, const Array4<const Real>& mskv,
25 const Array4<const Real>& calc_arr,
26 int icomp, int ncomp, Real /*time*/, int bccomp, int n_not_fill)
27{
28 BL_PROFILE_VAR("impose_cons_bcs()",impose_cons_bcs);
29 const auto& dom_lo = amrex::lbound(domain);
30 const auto& dom_hi = amrex::ubound(domain);
31
32 // Based on BCRec for the domain, we need to make BCRec for this Box
33 // bccomp is used as starting index for m_domain_bcs_type
34 // 0 is used as starting index for bcrs
35 Vector<BCRec> bcrs(ncomp);
36 amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
37
38 // xlo: ori = 0
39 // ylo: ori = 1
40 // zlo: ori = 2
41 // xhi: ori = 3
42 // yhi: ori = 4
43 // zhi: ori = 5
44
45 amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
46#ifdef AMREX_USE_GPU
47 Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
48#else
49 std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
50#endif
51 const amrex::BCRec* bc_ptr = bcrs_d.data();
52
53 GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NCONS+8> l_bc_extdir_vals_d;
54
55 for (int i = 0; i < ncomp; i++) {
56 for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++) {
57 l_bc_extdir_vals_d[i][ori] = m_bc_extdir_vals[bccomp+i][ori];
58 }
59 }
60
61 GeometryData const& geomdata = m_geom.data();
62 bool is_periodic_in_x = geomdata.isPeriodic(0);
63 bool is_periodic_in_y = geomdata.isPeriodic(1);
64 const Real eps= 1.0e-20_rt;
65
66 // If we're doing zeta, then calc_arr only has a single component
67 // corresponding to the component to be used in calculating the boundary
68 // value. If it's another variable, either we aren't using calc_arr
69 // or the components correspond to salt, temp, etc and we loop over ncomp
70 // so we leave icomp as is
71 int icomp_calc = (bccomp == BCVars::zeta_bc) ? 0 : icomp;
72
73 Box dest_arr_box = Box(dest_arr);
74 // First do all ext_dir bcs
75 if (!is_periodic_in_x)
76 {
77 Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
78 bx_xlo.setSmall(1,std::max(valid_bx.smallEnd(1)-1,dom_lo.y)); bx_xlo.setBig(1,std::min(valid_bx.bigEnd(1)+1,dom_hi.y));
79 Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
80 bx_xhi.setSmall(1,std::max(valid_bx.smallEnd(1)-1,dom_lo.y)); bx_xhi.setBig(1,std::min(valid_bx.bigEnd(1)+1,dom_hi.y));
81 ParallelFor(
82 bx_xlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
83 if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
84 dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][0] * mskr(i,j,0);
85 } else if (bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) {
86 Real grad_lo = (calc_arr(dom_lo.x ,j ,k,icomp_calc+n) - calc_arr(dom_lo.x ,j-1,k,icomp_calc+n)) * mskv(i,j,0);
87 Real grad_lo_jp1 = (calc_arr(dom_lo.x ,j+1,k,icomp_calc+n) - calc_arr(dom_lo.x ,j ,k,icomp_calc+n)) * mskv(i,j,0);
88 Real dTdt = calc_arr(dom_lo.x,j,k,icomp_calc+n) - dest_arr(dom_lo.x ,j,k,icomp+n);
89 Real dTdx = dest_arr(dom_lo.x,j,k,icomp+n) - dest_arr(dom_lo.x+1,j,k,icomp+n);
90 if (dTdt*dTdx < 0.0_rt) dTdt = 0.0_rt;
91 Real dTde = (dTdt * (grad_lo+grad_lo_jp1) > 0.0_rt) ? grad_lo : grad_lo_jp1;
92 Real cff = std::max(dTdx*dTdx+dTde*dTde,eps);
93 Real Cx = dTdt * dTdx;
94 dest_arr(i,j,k,icomp+n) = (cff * calc_arr(dom_lo.x-1,j,k,icomp_calc+n) + Cx * dest_arr(dom_lo.x,j,k,icomp+n)) * mskr(i,j,0) / (cff+Cx);
95 }
96 },
97 bx_xhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
98 if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
99 dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][3] * mskr(i,j,0);
100 } else if (bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) {
101 Real grad_hi = (calc_arr(dom_hi.x ,j ,k,icomp_calc+n) - calc_arr(dom_hi.x ,j-1,k,icomp_calc+n)) * mskv(i,j,0);
102 Real grad_hi_jp1 = (calc_arr(dom_hi.x ,j+1,k,icomp_calc+n) - calc_arr(dom_hi.x ,j ,k,icomp_calc+n)) * mskv(i,j,0);
103 Real dTdt = calc_arr(dom_hi.x,j,k,icomp_calc+n) - dest_arr(dom_hi.x ,j,k,icomp+n);
104 Real dTdx = dest_arr(dom_hi.x,j,k,icomp+n) - dest_arr(dom_hi.x-1,j,k,icomp+n);
105 if (dTdt * dTdx < 0.0_rt) dTdt = 0.0_rt;
106 Real dTde = (dTdt * (grad_hi + grad_hi_jp1) > 0.0_rt) ? grad_hi : grad_hi_jp1;
107 Real cff = std::max(dTdx*dTdx + dTde*dTde,eps);
108 Real Cx = dTdt * dTdx;
109 dest_arr(i,j,k,icomp+n) = (cff * calc_arr(dom_hi.x+1,j,k,icomp_calc+n) + Cx * dest_arr(dom_hi.x,j,k,icomp+n)) * mskr(i,j,0) / (cff+Cx);
110 }
111 }
112 );
113 }
114
115 if (!is_periodic_in_y)
116 {
117 Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
118 bx_ylo.setSmall(0,std::max(valid_bx.smallEnd(0)-1,dom_lo.x)); bx_ylo.setBig(0,std::min(valid_bx.bigEnd(0)+1,dom_hi.x));
119 Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
120 bx_yhi.setSmall(0,std::max(valid_bx.smallEnd(0)-1,dom_lo.x)); bx_yhi.setBig(0,std::min(valid_bx.bigEnd(0)+1,dom_hi.x));
121 ParallelFor(
122 bx_ylo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
123 if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
124 dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][1] * mskr(i,j,0);
125 } else if (bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) {
126 Real grad_lo = (calc_arr(i ,dom_lo.y, k,icomp_calc+n) - calc_arr(i-1,dom_lo.y ,k,icomp_calc+n)) * msku(i,j,0);
127 Real grad_lo_ip1 = (calc_arr(i+1,dom_lo.y ,k,icomp_calc+n) - calc_arr(i ,dom_lo.y ,k,icomp_calc+n)) * msku(i,j,0);
128 Real dTdt = calc_arr(i,dom_lo.y,k,icomp_calc+n) - dest_arr(i,dom_lo.y ,k,icomp+n);
129 Real dTde = dest_arr(i,dom_lo.y,k,icomp+n) - dest_arr(i,dom_lo.y+1,k,icomp+n);
130 if (dTdt * dTde < 0.0_rt) dTdt = 0.0_rt;
131 Real dTdx = (dTdt * (grad_lo + grad_lo_ip1) > 0.0_rt) ? grad_lo : grad_lo_ip1;
132 Real cff = std::max(dTdx*dTdx + dTde*dTde, eps);
133 Real Ce = dTdt*dTde;
134 dest_arr(i,j,k,icomp+n) = (cff * calc_arr(i,dom_lo.y-1,k,icomp_calc+n) + Ce * dest_arr(i,dom_lo.y,k,icomp+n)) * mskr(i,j,0) / (cff+Ce);
135 }
136 },
137 bx_yhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
138 if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
139 dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][4] * mskr(i,j,0);
140 } else if (bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) {
141 Real grad_hi = (calc_arr(i ,dom_hi.y ,k,icomp_calc+n) - calc_arr(i-1,dom_hi.y ,k,icomp_calc+n)) * msku(i,j,0);
142 Real grad_hi_ip1 = (calc_arr(i+1,dom_hi.y ,k,icomp_calc+n) - calc_arr(i ,dom_hi.y ,k,icomp_calc+n)) * msku(i,j,0);
143 Real dTdt = calc_arr(i,dom_hi.y,k,icomp_calc+n) - dest_arr(i,dom_hi.y ,k,icomp+n);
144 Real dTde = dest_arr(i,dom_hi.y,k,icomp+n) - dest_arr(i,dom_hi.y-1,k,icomp+n);
145 if (dTdt * dTde < 0.0_rt) dTdt = 0.0_rt;
146 Real dTdx = (dTdt * (grad_hi + grad_hi_ip1) > 0.0_rt) ? grad_hi : grad_hi_ip1;
147 Real cff = std::max(dTdx*dTdx + dTde*dTde, eps);
148 Real Ce = dTdt*dTde;
149 dest_arr(i,j,k,icomp+n) = (cff*calc_arr(i,dom_hi.y+1,k,icomp_calc+n) + Ce*dest_arr(i,dom_hi.y,k,icomp+n)) * mskr(i,j,0) / (cff+Ce);
150 }
151 }
152 );
153 }
154
155 {
156 Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
157 Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
158 ParallelFor(
159 bx_zlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
160 if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
161 dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][2] * mskr(i,j,0);
162 }
163 },
164 bx_zhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
165 if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
166 dest_arr(i,j,k,icomp+n) = l_bc_extdir_vals_d[n][5] * mskr(i,j,0);
167 }
168 }
169 );
170 }
171
172 Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1-n_not_fill);
173 bx_xlo.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
174 bx_xlo.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
175 Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1+n_not_fill);
176 bx_xhi.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
177 bx_xhi.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
178 Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1-n_not_fill);
179 bx_ylo.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
180 bx_ylo.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
181 Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1+n_not_fill);
182 bx_yhi.setSmall(2,std::max(dom_lo.z,bx.smallEnd(2)));
183 bx_yhi.setBig (2,std::min(dom_hi.z,bx.bigEnd(2)));
184 // Calculate intersections for corners before adjusting to exclude them
185 Box xlo_ylo = bx_xlo & bx_ylo;
186 Box xhi_ylo = bx_xhi & bx_ylo;
187 Box xlo_yhi = bx_xlo & bx_yhi;
188 Box xhi_yhi = bx_xhi & bx_yhi;
189// bx_xlo.setSmall(1,valid_bx.smallEnd(1)); bx_xlo.setBig(1,valid_bx.bigEnd(1));
190// bx_xhi.setSmall(1,valid_bx.smallEnd(1)); bx_xhi.setBig(1,valid_bx.bigEnd(1));
191// bx_ylo.setSmall(0,valid_bx.smallEnd(0)); bx_ylo.setBig(0,valid_bx.bigEnd(0));
192// bx_yhi.setSmall(0,valid_bx.smallEnd(0)); bx_yhi.setBig(0,valid_bx.bigEnd(0));
193 // Next do ghost cells in x-direction but not reaching out in y
194 // The corners we miss here will be covered in the y-loop below or by periodicity
195 if (!is_periodic_in_x or bccomp==BCVars::foextrap_bc)
196 {
197 // Populate ghost cells on lo-x and hi-x domain boundaries
198 ParallelFor(bx_xlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
199 int iflip = dom_lo.x - 1 - i;
200 int inner = (bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) ? 1 : 0;
201 if (bc_ptr[n].lo(0) == REMORABCType::foextrap || bc_ptr[n].lo(0) == REMORABCType::clamped || bc_ptr[n].lo(0) == REMORABCType::chapman || bc_ptr[n].lo(0) == REMORABCType::orlanski_rad ||
202 bc_ptr[n].lo(0) == REMORABCType::orlanski_rad_nudge) {
203 dest_arr(i,j,k,icomp+n) = dest_arr(dom_lo.x-n_not_fill-inner,j,k,icomp+n);
204 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
205 dest_arr(i,j,k,icomp+n) = dest_arr(iflip,j,k,icomp+n);
206 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
207 dest_arr(i,j,k,icomp+n) = -dest_arr(iflip,j,k,icomp+n);
208 }
209 },
210 bx_xhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
211 int iflip = 2*dom_hi.x + 1 - i;
212 int inner = (bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) ? 1 : 0;
213 if (bc_ptr[n].hi(0) == REMORABCType::foextrap || bc_ptr[n].hi(0) == REMORABCType::clamped || bc_ptr[n].hi(0) == REMORABCType::chapman || bc_ptr[n].hi(0) == REMORABCType::orlanski_rad ||
214 bc_ptr[n].hi(0) == REMORABCType::orlanski_rad_nudge) {
215 dest_arr(i,j,k,icomp+n) = dest_arr(dom_hi.x+n_not_fill+inner,j,k,icomp+n);
216 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
217 dest_arr(i,j,k,icomp+n) = dest_arr(iflip,j,k,icomp+n);
218 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
219 dest_arr(i,j,k,icomp+n) = -dest_arr(iflip,j,k,icomp+n);
220 }
221 }
222 );
223 }
224
225 if (!is_periodic_in_y or bccomp==BCVars::foextrap_bc)
226 {
227 // Populate ghost cells on lo-y and hi-y domain boundaries
228 ParallelFor(
229 bx_ylo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
230 int jflip = dom_lo.y - 1 - j;
231 int inner = (bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) ? 1 : 0;
232 if (bc_ptr[n].lo(1) == REMORABCType::foextrap || bc_ptr[n].lo(1) == REMORABCType::clamped || bc_ptr[n].lo(1) == REMORABCType::chapman || bc_ptr[n].lo(1) == REMORABCType::orlanski_rad ||
233 bc_ptr[n].lo(1) == REMORABCType::orlanski_rad_nudge) {
234 dest_arr(i,j,k,icomp+n) = dest_arr(i,dom_lo.y-n_not_fill-inner,k,icomp+n);
235 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
236 dest_arr(i,j,k,icomp+n) = dest_arr(i,jflip,k,icomp+n);
237 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
238 dest_arr(i,j,k,icomp+n) = -dest_arr(i,jflip,k,icomp+n);
239 }
240 },
241 bx_yhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
242 int jflip = 2*dom_hi.y + 1 - j;
243 int inner = (bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) ? 1 : 0;
244 if (bc_ptr[n].hi(1) == REMORABCType::foextrap || bc_ptr[n].hi(1) == REMORABCType::clamped || bc_ptr[n].hi(1) == REMORABCType::chapman || bc_ptr[n].lo(1) == REMORABCType::orlanski_rad ||
245 bc_ptr[n].hi(1) == REMORABCType::orlanski_rad_nudge) {
246 dest_arr(i,j,k,icomp+n) = dest_arr(i,dom_hi.y+n_not_fill+inner,k,icomp+n);
247 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
248 dest_arr(i,j,k,icomp+n) = dest_arr(i,jflip,k,icomp+n);
249 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
250 dest_arr(i,j,k,icomp+n) = -dest_arr(i,jflip,k,icomp+n);
251 }
252 }
253 );
254 }
255
256 {
257 Box bx_zlo(bx); bx_zlo.setBig (2,std::max(dom_lo.z-1,bx.smallEnd(2)));
258 Box bx_zhi(bx); bx_zhi.setSmall(2,std::min(dom_hi.z+1,bx.bigEnd(2)));
259 // Populate ghost cells on lo-z and hi-z domain boundaries
260
261 if (bx_zlo.ok()) {
262 ParallelFor(bx_zlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
263 {
264 int kflip = dom_lo.z - 1 - i;
265 if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
266 dest_arr(i,j,k,icomp+n) = dest_arr(i,j,dom_lo.z,icomp+n);
267 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
268 dest_arr(i,j,k,icomp+n) = dest_arr(i,j,kflip,icomp+n);
269 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
270 dest_arr(i,j,k,icomp+n) = -dest_arr(i,j,kflip,icomp+n);
271 }
272 });
273 }
274
275 if (bx_zlo.ok()) {
276 ParallelFor(bx_zhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
277 {
278 int kflip = 2*dom_hi.z + 1 - i;
279 if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
280 dest_arr(i,j,k,icomp+n) = dest_arr(i,j,dom_hi.z,icomp+n);
281 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
282 dest_arr(i,j,k,icomp+n) = dest_arr(i,j,kflip,icomp+n);
283 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
284 dest_arr(i,j,k,icomp+n) = -dest_arr(i,j,kflip,icomp+n);
285 }
286 });
287 }
288 }
289 if ((!is_periodic_in_x && !is_periodic_in_y) or bccomp==BCVars::foextrap_bc) {
290 // If we've applied boundary conditions to either side, update the corner
291 if (!xlo_ylo.isEmpty()) {
292 ParallelFor(xlo_ylo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
293 {
294 if (!(bc_ptr[n].lo(0) == REMORABCType::clamped || bc_ptr[n].lo(0) == REMORABCType::flather ||
295 bc_ptr[n].lo(0) == REMORABCType::chapman || bc_ptr[n].lo(0) == REMORABCType::orlanski_rad_nudge)
296 && !(bc_ptr[n].lo(1) == REMORABCType::clamped || bc_ptr[n].lo(1) == REMORABCType::flather ||
297 bc_ptr[n].lo(1) == REMORABCType::chapman || bc_ptr[n].lo(1) == REMORABCType::orlanski_rad_nudge)) {
298 dest_arr(i,j,k,icomp+n) = 0.5 * (dest_arr(i,dom_lo.y,k,icomp+n)
299 + dest_arr(dom_lo.x,j,k,icomp+n));
300 }
301 });
302 }
303 if (!xlo_yhi.isEmpty()) {
304 ParallelFor(xlo_yhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
305 {
306 if (!(bc_ptr[n].lo(0) == REMORABCType::clamped || bc_ptr[n].lo(0) == REMORABCType::flather ||
307 bc_ptr[n].lo(0) == REMORABCType::chapman || bc_ptr[n].lo(0) == REMORABCType::orlanski_rad_nudge)
308 && !(bc_ptr[n].hi(1) == REMORABCType::clamped || bc_ptr[n].hi(1) == REMORABCType::flather ||
309 bc_ptr[n].hi(1) == REMORABCType::chapman || bc_ptr[n].hi(1) == REMORABCType::orlanski_rad_nudge)) {
310 dest_arr(i,j,k,icomp+n) = 0.5 * (dest_arr(i,dom_hi.y,k,icomp+n)
311 + dest_arr(dom_lo.x,j,k,icomp+n));
312 }
313 });
314 }
315 if (!xhi_ylo.isEmpty()) {
316 ParallelFor(xhi_ylo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
317 {
318 if (!(bc_ptr[n].hi(0) == REMORABCType::clamped || bc_ptr[n].hi(0) == REMORABCType::flather ||
319 bc_ptr[n].hi(0) == REMORABCType::chapman || bc_ptr[n].hi(0) == REMORABCType::orlanski_rad_nudge)
320 && !(bc_ptr[n].lo(1) == REMORABCType::clamped || bc_ptr[n].lo(1) == REMORABCType::flather ||
321 bc_ptr[n].lo(1) == REMORABCType::chapman || bc_ptr[n].lo(1) == REMORABCType::orlanski_rad_nudge)) {
322 dest_arr(i,j,k,icomp+n) = 0.5 * (dest_arr(i,dom_lo.y,k,icomp+n)
323 + dest_arr(dom_hi.x,j,k,icomp+n));
324 }
325 });
326 }
327 if (!xhi_yhi.isEmpty()) {
328 ParallelFor(xhi_yhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
329 {
330 if (!(bc_ptr[n].hi(0) == REMORABCType::clamped || bc_ptr[n].hi(0) == REMORABCType::flather ||
331 bc_ptr[n].hi(0) == REMORABCType::chapman || bc_ptr[n].hi(0) == REMORABCType::orlanski_rad_nudge)
332 && !(bc_ptr[n].hi(1) == REMORABCType::clamped || bc_ptr[n].hi(1) == REMORABCType::flather ||
333 bc_ptr[n].hi(1) == REMORABCType::chapman || bc_ptr[n].hi(1) == REMORABCType::orlanski_rad_nudge)) {
334 dest_arr(i,j,k,icomp+n) = 0.5 * (dest_arr(i,dom_hi.y,k,icomp+n)
335 + dest_arr(dom_hi.x,j,k,icomp+n));
336 }
337 });
338 }
339 }
340
341 Gpu::streamSynchronize();
342}
#define NCONS
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NCONS+8 > m_bc_extdir_vals
amrex::Vector< amrex::BCRec > m_domain_bcs_type
void impose_cons_bcs(const amrex::Array4< amrex::Real > &mf, const amrex::Box &bx, const amrex::Box &valid_bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, const amrex::Array4< const amrex::Real > &mskr, const amrex::Array4< const amrex::Real > &msku, const amrex::Array4< const amrex::Real > &mskv, const amrex::Array4< const amrex::Real > &calc_arr, int icomp, int ncomp, amrex::Real time, int bccomp, int n_not_fill)
apply scalar type boundary conditions
amrex::Geometry m_geom