REMORA
Regional Modeling of Oceans Refined Adaptively
Loading...
Searching...
No Matches
REMORA_BoundaryConditions_xvel.cpp
Go to the documentation of this file.
1#include "AMReX_PhysBCFunct.H"
3
4using namespace amrex;
5
6/**
7 * @param[inout] dest_arr data on which to apply BCs
8 * @param[in ] bx box to update on
9 * @param[in ] domain domain box
10 * @param[in ] dxInv pm or pn
11 * @param[in ] msku land-sea mask on u-points
12 * @param[in ] calc_arr data to use in the RHS of calculations
13 * @param[in ] time current time
14 * @param[in ] bccomp index into both domain_bcs_type_bcr and bc_extdir_vals for icomp=0
15 */
16void REMORAPhysBCFunct::impose_xvel_bcs (const Array4<Real>& dest_arr, const Box& bx, const Box& domain,
17 const GpuArray<Real,AMREX_SPACEDIM> /*dxInv*/, const Array4<const Real>& msku,
18 const Array4<const Real>& calc_arr,
19 Real /*time*/, int bccomp)
20{
21 BL_PROFILE_VAR("impose_xvel_bcs()",impose_xvel_bcs);
22 const auto& dom_lo = amrex::lbound(domain);
23 const auto& dom_hi = amrex::ubound(domain);
24
25 // Based on BCRec for the domain, we need to make BCRec for this Box
26 // bccomp is used as starting index for m_domain_bcs_type
27 // 0 is used as starting index for bcrs
28 int ncomp = 1;
29 Vector<BCRec> bcrs(ncomp);
30 amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
31
32 // xlo: ori = 0
33 // ylo: ori = 1
34 // zlo: ori = 2
35 // xhi: ori = 3
36 // yhi: ori = 4
37 // zhi: ori = 5
38
39 amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
40#ifdef AMREX_USE_GPU
41 Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
42#else
43 std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
44#endif
45 const amrex::BCRec* bc_ptr = bcrs_d.data();
46 const auto* bc_extdir_vals_ptr = m_bc_extdir_vals_d.data();
47
48 GeometryData const& geomdata = m_geom.data();
49 bool is_periodic_in_x = geomdata.isPeriodic(0);
50 bool is_periodic_in_y = geomdata.isPeriodic(1);
51 const Real eps= 1.0e-20_rt;
52
53
54 Box dest_arr_box = growHi(convert(Box(dest_arr),IntVect(1,0,0)),0,-1);
55 // First do all ext_dir bcs
56 if (!is_periodic_in_x or bccomp == BCVars::foextrap_bc(m_ncons))
57 {
58 Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
59 Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+2);
60 Box bx_xlo_face(bx); bx_xlo_face.setSmall(0,dom_lo.x ); bx_xlo_face.setBig(0,dom_lo.x );
61 Box bx_xhi_face(bx); bx_xhi_face.setSmall(0,dom_hi.x+1); bx_xhi_face.setBig(0,dom_hi.x+1);
62 ParallelFor(
63 // We only set the values on the domain faces themselves if EXT_DIR or actual outflow
64 grow(bx_xlo_face,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
65 if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
66 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][0]*msku(i,j,0);
67 } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap) {
68 dest_arr(i,j,k) = dest_arr(dom_lo.x+1,j,k)*msku(i,j,0);
69 } else if (bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) {
70 Real grad_lo_ip1 = calc_arr(dom_lo.x+1,j ,k) - calc_arr(dom_lo.x+1,j-1,k);
71 Real grad_lo_ijp1 = calc_arr(dom_lo.x+1,j+1,k) - calc_arr(dom_lo.x+1,j ,k);
72 Real dUdt = calc_arr(dom_lo.x+1,j,k) - dest_arr(dom_lo.x+1,j,k);
73 Real dUdx = dest_arr(dom_lo.x+1,j,k) - dest_arr(dom_lo.x+2,j,k);
74 if (dUdt * dUdx < 0.0_rt) dUdt = 0.0_rt;
75 Real dUde = (dUdt * (grad_lo_ip1 + grad_lo_ijp1) > 0.0_rt) ? grad_lo_ip1 : grad_lo_ijp1;
76 Real cff = std::max(dUdx*dUdx+dUde*dUde,eps);
77 Real Cx = dUdt*dUdx;
78 dest_arr(i,j,k) = (cff * calc_arr(i,j,k) + Cx * dest_arr(dom_lo.x+1,j,k)) * msku(i,j,0) / (cff+Cx);
79 }
80 });
81 ParallelFor(
82 grow(bx_xlo,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
83 int inner = (bc_ptr[n].lo(0) == REMORABCType::foextrap) ? 1 : 0;
84 int iflip = dom_lo.x - i;
85 if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
86 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][0]*msku(i,j,0);
87 } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap || bc_ptr[n].lo(0) == REMORABCType::clamped ||
88 bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) {
89 dest_arr(i,j,k) = dest_arr(dom_lo.x+inner,j,k)*msku(i,j,0);
90 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
91 dest_arr(i,j,k) = dest_arr(iflip,j,k)*msku(i,j,0);
92 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
93 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*msku(i,j,0);
94 }
95 });
96 // We only set the values on the domain faces themselves if EXT_DIR or actual outflow
97 ParallelFor(
98 grow(bx_xhi_face,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
99 if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
100 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][3]*msku(i,j,0);
101 } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap) {
102 dest_arr(i,j,k) = dest_arr(dom_hi.x,j,k)*msku(i,j,0);
103 } else if (bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) {
104 Real grad_hi = calc_arr(dom_hi.x ,j ,k) - calc_arr(dom_hi.x ,j-1,k);
105 Real grad_hi_jp1 = calc_arr(dom_hi.x ,j+1,k) - calc_arr(dom_hi.x ,j ,k);
106 Real dUdt = calc_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x ,j,k);
107 Real dUdx = dest_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x-1,j,k);
108 if (dUdt * dUdx < 0.0_rt) dUdt = 0.0_rt;
109 Real dUde = (dUdt * (grad_hi + grad_hi_jp1) > 0.0_rt) ? grad_hi : grad_hi_jp1;
110 Real cff = std::max(dUdx*dUdx+dUde*dUde,eps);
111 Real Cx = dUdt * dUdx;
112 dest_arr(i,j,k) = (cff * calc_arr(dom_hi.x+1,j,k) + Cx * dest_arr(dom_hi.x,j,k)) * msku(i,j,0) / (cff + Cx);
113 }
114 });
115 ParallelFor(
116 grow(bx_xhi,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
117 int iflip = 2*(dom_hi.x + 1) - i;
118 int inner = (bc_ptr[n].hi(0) == REMORABCType::foextrap) ? 1 : 0;
119 if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
120 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][3]*msku(i,j,0);
121 } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap || bc_ptr[n].hi(0) == REMORABCType::clamped ||
122 bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) {
123 dest_arr(i,j,k) = dest_arr(dom_hi.x+1-inner,j,k)*msku(i,j,0);
124 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
125 dest_arr(i,j,k) = dest_arr(iflip,j,k)*msku(i,j,0);
126 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
127 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*msku(i,j,0);
128 }
129 });
130 } // not periodic in x
131
132 if (!is_periodic_in_y or bccomp == BCVars::foextrap_bc(m_ncons))
133 {
134 // Populate ghost cells on lo-y and hi-y domain boundaries
135 Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
136 Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
137 ParallelFor(
138 grow(bx_ylo,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
139 int jflip = dom_lo.y - 1 - j;
140 if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
141 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][1]*msku(i,j,0);
142 } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap || bc_ptr[n].lo(1) == REMORABCType::clamped) {
143 dest_arr(i,j,k) = dest_arr(i,dom_lo.y,k)*msku(i,j,0);
144 } else if (bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) {
145 Real grad_lo = calc_arr(i+1,dom_lo.y ,k) - calc_arr(i ,dom_lo.y ,k);
146 Real grad_lo_im1 = calc_arr(i ,dom_lo.y ,k) - calc_arr(i-1,dom_lo.y ,k);
147 Real dUdt = calc_arr(i,dom_lo.y,k) - dest_arr(i,dom_lo.y ,k);
148 Real dUde = dest_arr(i,dom_lo.y,k) - dest_arr(i,dom_lo.y+1,k);
149 if (dUdt * dUde < 0.0_rt) dUdt = 0.0_rt;
150 Real dUdx = (dUdt * (grad_lo_im1 + grad_lo) > 0.0_rt) ? grad_lo_im1 : grad_lo;
151 Real cff = std::max(dUdx * dUdx + dUde * dUde, eps);
152 Real Ce = dUdt * dUde;
153 dest_arr(i,j,k) = (cff * calc_arr(i,dom_lo.y-1,k) + Ce * dest_arr(i,dom_lo.y,k)) * msku(i,j,0) / (cff + Ce);
154 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
155 dest_arr(i,j,k) = dest_arr(i,jflip,k)*msku(i,j,0);
156 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
157 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*msku(i,j,0);
158 }
159 },
160 grow(bx_yhi,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
161 int jflip = 2*dom_hi.y + 1 - j;
162 if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
163 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][4]*msku(i,j,0);
164 } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap || bc_ptr[n].hi(1) == REMORABCType::clamped) {
165 dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k)*msku(i,j,0);
166 } else if (bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) {
167 Real grad_hi = calc_arr(i+1,dom_hi.y ,k) - calc_arr(i ,dom_hi.y ,k);
168 Real grad_hi_im1 = calc_arr(i ,dom_hi.y ,k) - calc_arr(i-1,dom_hi.y ,k);
169 Real dUdt = calc_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y ,k);
170 Real dUde = dest_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y-1,k);
171 if (dUdt * dUde < 0.0_rt) dUdt = 0.0_rt;
172 Real dUdx = (dUdt * (grad_hi_im1 + grad_hi) > 0.0_rt) ? grad_hi_im1 : grad_hi;
173 Real cff = std::max(dUdx*dUdx+dUde*dUde,eps);
174 Real Ce = dUdt * dUde;
175 dest_arr(i,j,k) = (cff * calc_arr(i,dom_hi.y+1,k) + Ce * dest_arr(i,dom_hi.y,k)) * msku(i,j,0) / (cff+Ce);
176 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
177 dest_arr(i,j,k) = dest_arr(i,jflip,k)*msku(i,j,0);
178 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
179 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*msku(i,j,0);
180 }
181 }
182 );
183 } // not periodic in y
184
185 {
186 // Populate ghost cells on lo-z and hi-z domain boundaries
187 Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
188 Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
189 ParallelFor(
190 bx_zlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
191 int kflip = dom_lo.z - 1 - k;
192 if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
193 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][2]*msku(i,j,0);
194 } else if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
195 dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z)*msku(i,j,0);
196 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
197 dest_arr(i,j,k) = dest_arr(i,j,kflip)*msku(i,j,0);
198 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
199 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*msku(i,j,0);
200 }
201 },
202 bx_zhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
203 int kflip = 2*dom_hi.z + 1 - k;
204 if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
205 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][5]*msku(i,j,0);
206 } else if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
207 dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z)*msku(i,j,0);
208 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
209 dest_arr(i,j,k) = dest_arr(i,j,kflip)*msku(i,j,0);
210 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
211 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*msku(i,j,0);
212 }
213 }
214 );
215 } // z
216
217 if ((!is_periodic_in_x or bccomp == BCVars::foextrap_bc(m_ncons)) and
218 (!is_periodic_in_y or bccomp == BCVars::foextrap_bc(m_ncons))) {
219 Box xlo(bx); xlo.setBig (0,dom_lo.x );
220 Box xhi(bx); xhi.setSmall(0,dom_hi.x+1);
221 Box ylo(bx); ylo.setBig (1,dom_lo.y-1);
222 Box yhi(bx); yhi.setSmall(1,dom_hi.y+1);
223 Box xlo_ylo = xlo & ylo;
224 Box xlo_yhi = xlo & yhi;
225 Box xhi_ylo = xhi & ylo;
226 Box xhi_yhi = xhi & yhi;
227
228 const bool clamp_west = m_domain_bcs_type[bccomp].lo(0) == REMORABCType::clamped;
229 const bool clamp_east = m_domain_bcs_type[bccomp].hi(0) == REMORABCType::clamped;
230 const bool clamp_south = m_domain_bcs_type[bccomp].lo(1) == REMORABCType::clamped;
231 const bool clamp_north = m_domain_bcs_type[bccomp].hi(1) == REMORABCType::clamped;
232
233 if (!clamp_west && !clamp_south) {
234 ParallelFor(xlo_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
235 {
236 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y,k) + dest_arr(dom_lo.x+1,j,k));
237 });
238 }
239 if (!clamp_west && !clamp_north) {
240 ParallelFor(xlo_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
241 {
242 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_lo.x+1,j,k));
243 });
244 }
245 if (!clamp_east && !clamp_south) {
246 ParallelFor(xhi_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
247 {
248 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y,k) + dest_arr(dom_hi.x,j,k));
249 });
250 }
251 if (!clamp_east && !clamp_north) {
252 ParallelFor(xhi_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
253 {
254 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_hi.x,j,k));
255 });
256 }
257 }
258
259
260 Gpu::streamSynchronize();
261}
void impose_xvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, const amrex::Array4< const amrex::Real > &msku, const amrex::Array4< const amrex::Real > &calc_arr, amrex::Real time, int bccomp)
apply x-velocity type boundary conditions
amrex::Vector< amrex::BCRec > m_domain_bcs_type
amrex::Geometry m_geom
amrex::Gpu::DeviceVector< amrex::GpuArray< amrex::Real, AMREX_SPACEDIM *2 > > m_bc_extdir_vals_d
int foextrap_bc(int ncons) noexcept