REMORA
Regional Modeling of Oceans Refined Adaptively
Loading...
Searching...
No Matches
REMORA_BoundaryConditions_xvel.cpp
Go to the documentation of this file.
1#include "AMReX_PhysBCFunct.H"
3
4using namespace amrex;
5
6/**
7 * @param[inout] dest_arr data on which to apply BCs
8 * @param[in ] bx box to update on
9 * @param[in ] domain domain box
10 * @param[in ] dxInv pm or pn
11 * @param[in ] msku land-sea mask on u-points
12 * @param[in ] calc_arr data to use in the RHS of calculations
13 * @param[in ] time current time
14 * @param[in ] bccomp index into both domain_bcs_type_bcr and bc_extdir_vals for icomp=0
15 */
16void REMORAPhysBCFunct::impose_xvel_bcs (const Array4<Real>& dest_arr, const Box& bx, const Box& domain,
17 const GpuArray<Real,AMREX_SPACEDIM> /*dxInv*/, const Array4<const Real>& msku,
18 const Array4<const Real>& calc_arr,
19 Real /*time*/, int bccomp)
20{
21 BL_PROFILE_VAR("impose_xvel_bcs()",impose_xvel_bcs);
22 const auto& dom_lo = amrex::lbound(domain);
23 const auto& dom_hi = amrex::ubound(domain);
24
25 // Based on BCRec for the domain, we need to make BCRec for this Box
26 // bccomp is used as starting index for m_domain_bcs_type
27 // 0 is used as starting index for bcrs
28 int ncomp = 1;
29 Vector<BCRec> bcrs(ncomp);
30 amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
31
32 // xlo: ori = 0
33 // ylo: ori = 1
34 // zlo: ori = 2
35 // xhi: ori = 3
36 // yhi: ori = 4
37 // zhi: ori = 5
38
39 amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
40#ifdef AMREX_USE_GPU
41 Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
42#else
43 std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
44#endif
45 const amrex::BCRec* bc_ptr = bcrs_d.data();
46
47 GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NCONS+8> l_bc_extdir_vals_d;
48
49 for (int i = 0; i < ncomp; i++)
50 for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++)
51 l_bc_extdir_vals_d[i][ori] = m_bc_extdir_vals[bccomp+i][ori];
52
53 GeometryData const& geomdata = m_geom.data();
54 bool is_periodic_in_x = geomdata.isPeriodic(0);
55 bool is_periodic_in_y = geomdata.isPeriodic(1);
56 const Real eps= 1.0e-20_rt;
57
58
59 Box dest_arr_box = growHi(convert(Box(dest_arr),IntVect(1,0,0)),0,-1);
60 // First do all ext_dir bcs
61 if (!is_periodic_in_x or bccomp==BCVars::foextrap_bc)
62 {
63 Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
64 Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+2);
65 Box bx_xlo_face(bx); bx_xlo_face.setSmall(0,dom_lo.x ); bx_xlo_face.setBig(0,dom_lo.x );
66 Box bx_xhi_face(bx); bx_xhi_face.setSmall(0,dom_hi.x+1); bx_xhi_face.setBig(0,dom_hi.x+1);
67 ParallelFor(
68 // We only set the values on the domain faces themselves if EXT_DIR or actual outflow
69 grow(bx_xlo_face,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
70 if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
71 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][0]*msku(i,j,0);
72 } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap) {
73 dest_arr(i,j,k) = dest_arr(dom_lo.x+1,j,k)*msku(i,j,0);
74 } else if (bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) {
75 Real grad_lo_ip1 = calc_arr(dom_lo.x+1,j ,k) - calc_arr(dom_lo.x+1,j-1,k);
76 Real grad_lo_ijp1 = calc_arr(dom_lo.x+1,j+1,k) - calc_arr(dom_lo.x+1,j ,k);
77 Real dUdt = calc_arr(dom_lo.x+1,j,k) - dest_arr(dom_lo.x+1,j,k);
78 Real dUdx = dest_arr(dom_lo.x+1,j,k) - dest_arr(dom_lo.x+2,j,k);
79 if (dUdt * dUdx < 0.0_rt) dUdt = 0.0_rt;
80 Real dUde = (dUdt * (grad_lo_ip1 + grad_lo_ijp1) > 0.0_rt) ? grad_lo_ip1 : grad_lo_ijp1;
81 Real cff = std::max(dUdx*dUdx+dUde*dUde,eps);
82 Real Cx = dUdt*dUdx;
83 dest_arr(i,j,k) = (cff * calc_arr(i,j,k) + Cx * dest_arr(dom_lo.x+1,j,k)) * msku(i,j,0) / (cff+Cx);
84 }
85 });
86 ParallelFor(
87 grow(bx_xlo,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
88 int inner = (bc_ptr[n].lo(0) == REMORABCType::foextrap) ? 1 : 0;
89 int iflip = dom_lo.x - i;
90 if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
91 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][0]*msku(i,j,0);
92 } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap || bc_ptr[n].lo(0) == REMORABCType::clamped ||
93 bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) {
94 dest_arr(i,j,k) = dest_arr(dom_lo.x+inner,j,k)*msku(i,j,0);
95 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
96 dest_arr(i,j,k) = dest_arr(iflip,j,k)*msku(i,j,0);
97 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
98 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*msku(i,j,0);
99 }
100 });
101 // We only set the values on the domain faces themselves if EXT_DIR or actual outflow
102 ParallelFor(
103 grow(bx_xhi_face,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
104 if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
105 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][3]*msku(i,j,0);
106 } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap) {
107 dest_arr(i,j,k) = dest_arr(dom_hi.x,j,k)*msku(i,j,0);
108 } else if (bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) {
109 Real grad_hi = calc_arr(dom_hi.x ,j ,k) - calc_arr(dom_hi.x ,j-1,k);
110 Real grad_hi_jp1 = calc_arr(dom_hi.x ,j+1,k) - calc_arr(dom_hi.x ,j ,k);
111 Real dUdt = calc_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x ,j,k);
112 Real dUdx = dest_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x-1,j,k);
113 if (dUdt * dUdx < 0.0_rt) dUdt = 0.0_rt;
114 Real dUde = (dUdt * (grad_hi + grad_hi_jp1) > 0.0_rt) ? grad_hi : grad_hi_jp1;
115 Real cff = std::max(dUdx*dUdx+dUde*dUde,eps);
116 Real Cx = dUdt * dUdx;
117 dest_arr(i,j,k) = (cff * calc_arr(dom_hi.x+1,j,k) + Cx * dest_arr(dom_hi.x,j,k)) * msku(i,j,0) / (cff + Cx);
118 }
119 });
120 ParallelFor(
121 grow(bx_xhi,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
122 int iflip = 2*(dom_hi.x + 1) - i;
123 int inner = (bc_ptr[n].hi(0) == REMORABCType::foextrap) ? 1 : 0;
124 if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
125 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][3]*msku(i,j,0);
126 } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap || bc_ptr[n].hi(0) == REMORABCType::clamped ||
127 bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) {
128 dest_arr(i,j,k) = dest_arr(dom_hi.x+1-inner,j,k)*msku(i,j,0);
129 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
130 dest_arr(i,j,k) = dest_arr(iflip,j,k)*msku(i,j,0);
131 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
132 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*msku(i,j,0);
133 }
134 });
135 } // not periodic in x
136
137 if (!is_periodic_in_y or bccomp==BCVars::foextrap_bc)
138 {
139 // Populate ghost cells on lo-y and hi-y domain boundaries
140 Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
141 Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
142 ParallelFor(
143 grow(bx_ylo,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
144 int jflip = dom_lo.y - 1 - j;
145 if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
146 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][1]*msku(i,j,0);
147 } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap || bc_ptr[n].lo(1) == REMORABCType::clamped) {
148 dest_arr(i,j,k) = dest_arr(i,dom_lo.y,k)*msku(i,j,0);
149 } else if (bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) {
150 Real grad_lo = calc_arr(i+1,dom_lo.y ,k) - calc_arr(i ,dom_lo.y ,k);
151 Real grad_lo_im1 = calc_arr(i ,dom_lo.y ,k) - calc_arr(i-1,dom_lo.y ,k);
152 Real dUdt = calc_arr(i,dom_lo.y,k) - dest_arr(i,dom_lo.y ,k);
153 Real dUde = dest_arr(i,dom_lo.y,k) - dest_arr(i,dom_lo.y+1,k);
154 if (dUdt * dUde < 0.0_rt) dUdt = 0.0_rt;
155 Real dUdx = (dUdt * (grad_lo_im1 + grad_lo) > 0.0_rt) ? grad_lo_im1 : grad_lo;
156 Real cff = std::max(dUdx * dUdx + dUde * dUde, eps);
157 Real Ce = dUdt * dUde;
158 dest_arr(i,j,k) = (cff * calc_arr(i,dom_lo.y-1,k) + Ce * dest_arr(i,dom_lo.y,k)) * msku(i,j,0) / (cff + Ce);
159 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
160 dest_arr(i,j,k) = dest_arr(i,jflip,k)*msku(i,j,0);
161 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
162 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*msku(i,j,0);
163 }
164 },
165 grow(bx_yhi,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
166 int jflip = 2*dom_hi.y + 1 - j;
167 if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
168 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][4]*msku(i,j,0);
169 } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap || bc_ptr[n].hi(1) == REMORABCType::clamped) {
170 dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k)*msku(i,j,0);
171 } else if (bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) {
172 Real grad_hi = calc_arr(i+1,dom_hi.y ,k) - calc_arr(i ,dom_hi.y ,k);
173 Real grad_hi_im1 = calc_arr(i ,dom_hi.y ,k) - calc_arr(i-1,dom_hi.y ,k);
174 Real dUdt = calc_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y ,k);
175 Real dUde = dest_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y-1,k);
176 if (dUdt * dUde < 0.0_rt) dUdt = 0.0_rt;
177 Real dUdx = (dUdt * (grad_hi_im1 + grad_hi) > 0.0_rt) ? grad_hi_im1 : grad_hi;
178 Real cff = std::max(dUdx*dUdx+dUde*dUde,eps);
179 Real Ce = dUdt * dUde;
180 dest_arr(i,j,k) = (cff * calc_arr(i,dom_hi.y+1,k) + Ce * dest_arr(i,dom_hi.y,k)) * msku(i,j,0) / (cff+Ce);
181 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
182 dest_arr(i,j,k) = dest_arr(i,jflip,k)*msku(i,j,0);
183 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
184 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*msku(i,j,0);
185 }
186 }
187 );
188 } // not periodic in y
189
190 {
191 // Populate ghost cells on lo-z and hi-z domain boundaries
192 Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
193 Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
194 ParallelFor(
195 bx_zlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
196 int kflip = dom_lo.z - 1 - k;
197 if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
198 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][2]*msku(i,j,0);
199 } else if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
200 dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z)*msku(i,j,0);
201 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
202 dest_arr(i,j,k) = dest_arr(i,j,kflip)*msku(i,j,0);
203 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
204 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*msku(i,j,0);
205 }
206 },
207 bx_zhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
208 int kflip = 2*dom_hi.z + 1 - k;
209 if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
210 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][5]*msku(i,j,0);
211 } else if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
212 dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z)*msku(i,j,0);
213 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
214 dest_arr(i,j,k) = dest_arr(i,j,kflip)*msku(i,j,0);
215 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
216 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*msku(i,j,0);
217 }
218 }
219 );
220 } // z
221
222 if ((!is_periodic_in_x or bccomp==BCVars::foextrap_bc) and
223 (!is_periodic_in_y or bccomp==BCVars::foextrap_bc)) {
224 Box xlo(bx); xlo.setBig (0,dom_lo.x );
225 Box xhi(bx); xhi.setSmall(0,dom_hi.x+1);
226 Box ylo(bx); ylo.setBig (1,dom_lo.y-1);
227 Box yhi(bx); yhi.setSmall(1,dom_hi.y+1);
228 Box xlo_ylo = xlo & ylo;
229 Box xlo_yhi = xlo & yhi;
230 Box xhi_ylo = xhi & ylo;
231 Box xhi_yhi = xhi & yhi;
232
233 const bool clamp_west = m_domain_bcs_type[bccomp].lo(0) == REMORABCType::clamped;
234 const bool clamp_east = m_domain_bcs_type[bccomp].hi(0) == REMORABCType::clamped;
235 const bool clamp_south = m_domain_bcs_type[bccomp].lo(1) == REMORABCType::clamped;
236 const bool clamp_north = m_domain_bcs_type[bccomp].hi(1) == REMORABCType::clamped;
237
238 if (!clamp_west && !clamp_south) {
239 ParallelFor(xlo_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
240 {
241 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y,k) + dest_arr(dom_lo.x+1,j,k));
242 });
243 }
244 if (!clamp_west && !clamp_north) {
245 ParallelFor(xlo_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
246 {
247 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_lo.x+1,j,k));
248 });
249 }
250 if (!clamp_east && !clamp_south) {
251 ParallelFor(xhi_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
252 {
253 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y,k) + dest_arr(dom_hi.x,j,k));
254 });
255 }
256 if (!clamp_east && !clamp_north) {
257 ParallelFor(xhi_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
258 {
259 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_hi.x,j,k));
260 });
261 }
262 }
263
264
265 Gpu::streamSynchronize();
266}
#define NCONS
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NCONS+8 > m_bc_extdir_vals
void impose_xvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, const amrex::Array4< const amrex::Real > &msku, const amrex::Array4< const amrex::Real > &calc_arr, amrex::Real time, int bccomp)
apply x-velocity type boundary conditions
amrex::Vector< amrex::BCRec > m_domain_bcs_type
amrex::Geometry m_geom