REMORA
Regional Modeling of Oceans Refined Adaptively
Loading...
Searching...
No Matches
REMORA_BoundaryConditions_yvel.cpp
Go to the documentation of this file.
1#include "AMReX_PhysBCFunct.H"
3
4using namespace amrex;
5
6/**
7 * @param[inout] dest_arr data on which to apply BCs
8 * @param[in ] bx box to update on
9 * @param[in ] domain domain box
10 * @param[in ] dxInv pm or pn
11 * @param[in ] mskv land-sea mask on v-points
12 * @param[in ] calc_arr data to use in the RHS of calculations
13 * @param[in ] time current time
14 * @param[in ] bccomp index into both domain_bcs_type_bcr and bc_extdir_vals for icomp=0
15 */
16void REMORAPhysBCFunct::impose_yvel_bcs (const Array4<Real>& dest_arr, const Box& bx, const Box& domain,
17 const GpuArray<Real,AMREX_SPACEDIM> /*dxInv*/, const Array4<const Real>& mskv,
18 const Array4<const Real>& calc_arr,
19 Real /*time*/, int bccomp)
20{
21 BL_PROFILE_VAR("impose_yvel_bcs()",impose_yvel_bcs);
22 const auto& dom_lo = amrex::lbound(domain);
23 const auto& dom_hi = amrex::ubound(domain);
24
25 // Based on BCRec for the domain, we need to make BCRec for this Box
26 // bccomp is used as starting index for m_domain_bcs_type
27 // 0 is used as starting index for bcrs
28 int ncomp = 1;
29 Vector<BCRec> bcrs(ncomp);
30 amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
31
32 // xlo: ori = 0
33 // ylo: ori = 1
34 // zlo: ori = 2
35 // xhi: ori = 3
36 // yhi: ori = 4
37 // zhi: ori = 5
38
39 amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
40#ifdef AMREX_USE_GPU
41 Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
42#else
43 std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
44#endif
45 const amrex::BCRec* bc_ptr = bcrs_d.data();
46
47 GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>, AMREX_SPACEDIM+NCONS+8> l_bc_extdir_vals_d;
48
49 for (int i = 0; i < ncomp; i++)
50 for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++)
51 l_bc_extdir_vals_d[i][ori] = m_bc_extdir_vals[bccomp+i][ori];
52
53 GeometryData const& geomdata = m_geom.data();
54 bool is_periodic_in_x = geomdata.isPeriodic(0);
55 bool is_periodic_in_y = geomdata.isPeriodic(1);
56 const Real eps= 1.0e-20_rt;
57
58 Box dest_arr_box = growHi(convert(Box(dest_arr),IntVect(0,1,0)),1,-1);
59 // First do all ext_dir bcs
60 if (!is_periodic_in_x or bccomp==BCVars::foextrap_bc)
61 {
62 // Populate ghost cells on lo-x and hi-x domain boundaries
63 Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
64 Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
65 ParallelFor(
66 grow(bx_xlo,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
67 int iflip = dom_lo.x - 1- i;
68 if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
69 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][0]*mskv(i,j,0);
70 } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap || bc_ptr[n].lo(0) == REMORABCType::clamped) {
71 dest_arr(i,j,k) = dest_arr(dom_lo.x,j,k)*mskv(i,j,0);
72 } else if (bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) {
73 Real grad_lo = calc_arr(dom_lo.x ,j+1,k) - calc_arr(dom_lo.x ,j ,k);
74 Real grad_lo_jm1 = calc_arr(dom_lo.x ,j ,k) - calc_arr(dom_lo.x ,j-1,k);
75 Real dVdt = calc_arr(dom_lo.x,j,k) - dest_arr(dom_lo.x ,j,k);
76 Real dVdx = dest_arr(dom_lo.x,j,k) - dest_arr(dom_lo.x+1,j,k);
77 if (dVdt * dVdx < 0.0_rt) dVdt = 0.0_rt;
78 Real dVde = (dVdt * (grad_lo_jm1 + grad_lo) > 0.0_rt) ? grad_lo_jm1 : grad_lo;
79 Real cff = std::max(dVdx*dVdx + dVde*dVde,eps);
80 Real Cx = dVdt * dVdx;
81 dest_arr(i,j,k) = (cff * calc_arr(dom_lo.x-1,j,k) + Cx * dest_arr(dom_lo.x,j,k)) * mskv(i,j,0) / (cff + Cx);
82 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
83 dest_arr(i,j,k) = dest_arr(iflip,j,k)*mskv(i,j,0);
84 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
85 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*mskv(i,j,0);
86 }
87 },
88 grow(bx_xhi,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
89 int iflip = 2*dom_hi.x + 1 - i;
90 if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
91 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][3]*mskv(i,j,0);
92 } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap || bc_ptr[n].hi(0) == REMORABCType::clamped) {
93 dest_arr(i,j,k) = dest_arr(dom_hi.x,j,k)*mskv(i,j,0);
94 } else if (bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) {
95 Real grad_hi = calc_arr(dom_hi.x ,j+1,k) - calc_arr(dom_hi.x ,j ,k);
96 Real grad_hi_jm1 = calc_arr(dom_hi.x ,j ,k) - calc_arr(dom_hi.x ,j-1,k);
97 Real dVdt = calc_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x ,j,k);
98 Real dVdx = dest_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x-1,j,k);
99 if (dVdt*dVdx < 0.0_rt) dVdt = 0.0_rt;
100 Real dVde = (dVdt * (grad_hi_jm1 + grad_hi) > 0.0_rt) ? grad_hi_jm1 : grad_hi;
101 Real cff = std::max(dVdx*dVdx+dVde*dVde,eps);
102 Real Cx = dVdt * dVdx;
103 dest_arr(i,j,k) = (cff * calc_arr(dom_hi.x+1,j,k) + Cx * dest_arr(dom_hi.x,j,k)) * mskv(i,j,0) / (cff + Cx);
104 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
105 dest_arr(i,j,k) = dest_arr(iflip,j,k)*mskv(i,j,0);
106 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
107 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*mskv(i,j,0);
108 }
109 }
110 );
111 }
112
113 if (!is_periodic_in_y or bccomp==BCVars::foextrap_bc)
114 {
115 // Populate ghost cells on lo-y and hi-y domain boundaries
116 Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
117 Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+2);
118 Box bx_ylo_face(bx); bx_ylo_face.setSmall(1,dom_lo.y ); bx_ylo_face.setBig(1,dom_lo.y );
119 Box bx_yhi_face(bx); bx_yhi_face.setSmall(1,dom_hi.y+1); bx_yhi_face.setBig(1,dom_hi.y+1);
120
121 ParallelFor(
122 // We only set the values on the domain faces themselves if EXT_DIR or outflow
123 grow(bx_ylo_face,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
124 if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
125 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][1]*mskv(i,j,0);
126 } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap) {
127 dest_arr(i,j,k) = dest_arr(i,dom_lo.y+1,k)*mskv(i,j,0);
128 } else if (bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) {
129 Real grad_lo_jp1 = calc_arr(i ,dom_lo.y+1,k) - calc_arr(i-1,dom_lo.y+1,k);
130 Real grad_lo_ijp1 = calc_arr(i+1,dom_lo.y+1,k) - calc_arr(i ,dom_lo.y+1,k);
131 Real dVdt = calc_arr(i,dom_lo.y+1,k) - dest_arr(i,dom_lo.y+1,k);
132 Real dVde = dest_arr(i,dom_lo.y+1,k) - dest_arr(i,dom_lo.y+2,k);
133 if (dVdt*dVde < 0.0_rt) dVdt = 0.0_rt;
134 Real dVdx = (dVdt * (grad_lo_jp1 + grad_lo_ijp1) > 0.0_rt) ? grad_lo_jp1 : grad_lo_ijp1;
135 Real cff = std::max(dVdx*dVdx + dVde*dVde, eps);
136 Real Ce = dVdt * dVde;
137 dest_arr(i,j,k) = (cff * calc_arr(i,dom_lo.y,k) + Ce * dest_arr(i,dom_lo.y+1,k)) * mskv(i,j,0) / (cff + Ce);
138 }
139 });
140 ParallelFor(
141 grow(bx_ylo,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
142 int jflip = dom_lo.y-j;
143 int inner = (bc_ptr[n].lo(1) == REMORABCType::foextrap) ? 1 : 0;
144 if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
145 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][1]*mskv(i,j,0);
146 } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap || bc_ptr[n].lo(1) == REMORABCType::clamped ||
147 bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) {
148 dest_arr(i,j,k) = dest_arr(i,dom_lo.y+inner,k)*mskv(i,j,0);
149 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
150 dest_arr(i,j,k) = dest_arr(i,jflip,k)*mskv(i,j,0);
151 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
152 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*mskv(i,j,0);
153 }
154 });
155 ParallelFor(
156 // We only set the values on the domain faces themselves if EXT_DIR or outflow
157 grow(bx_yhi_face,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
158 if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
159 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][4]*mskv(i,j,0);
160 } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap) {
161 dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k)*mskv(i,j,0);
162 } else if (bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) {
163 Real grad_hi = calc_arr(i ,dom_hi.y ,k) - calc_arr(i-1,dom_hi.y ,k);
164 Real grad_hi_ip1 = calc_arr(i+1,dom_hi.y ,k) - calc_arr(i ,dom_hi.y ,k);
165 Real dVdt = calc_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y ,k);
166 Real dVde = dest_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y-1,k);
167 if (dVdt*dVde < 0.0_rt) dVdt = 0.0_rt;
168 Real dVdx = (dVdt * (grad_hi + grad_hi_ip1) > 0.0_rt) ? grad_hi : grad_hi_ip1;
169 Real cff = std::max(dVdx*dVdx + dVde*dVde, eps);
170 Real Ce = dVdt * dVde;
171 dest_arr(i,j,k) = (cff * calc_arr(i,dom_hi.y+1,k) + Ce * dest_arr(i,dom_hi.y,k)) * mskv(i,j,0) / (cff + Ce);
172 }
173 });
174 ParallelFor(
175 grow(bx_yhi,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
176 int jflip = 2*(dom_hi.y + 1) - j;
177 int inner = (bc_ptr[n].hi(1) == REMORABCType::foextrap) ? 1 : 0;
178 if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
179 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][4]*mskv(i,j,0);
180 } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap || bc_ptr[n].hi(1) == REMORABCType::clamped ||
181 bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) {
182 dest_arr(i,j,k) = dest_arr(i,dom_hi.y+1-inner,k)*mskv(i,j,0);
183 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
184 dest_arr(i,j,k) = dest_arr(i,jflip,k)*mskv(i,j,0);
185 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
186 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*mskv(i,j,0);
187 }
188 });
189 }
190
191 {
192 // Populate ghost cells on lo-z and hi-z domain boundaries
193 Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
194 Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
195 ParallelFor(
196 bx_zlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
197 int kflip = dom_lo.z - 1 - k;
198 if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
199 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][2]*mskv(i,j,0);
200 } else if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
201 dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z)*mskv(i,j,0);
202 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
203 dest_arr(i,j,k) = dest_arr(i,j,kflip)*mskv(i,j,0);
204 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
205 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*mskv(i,j,0);
206 }
207 },
208 bx_zhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
209 int kflip = 2*dom_hi.z + 1 - k;
210 if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
211 dest_arr(i,j,k) = l_bc_extdir_vals_d[n][5]*mskv(i,j,0);
212 } else if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
213 dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z)*mskv(i,j,0);
214 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
215 dest_arr(i,j,k) = dest_arr(i,j,kflip)*mskv(i,j,0);
216 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
217 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*mskv(i,j,0);
218 }
219 }
220 );
221 }
222
223 if ((!is_periodic_in_x or bccomp==BCVars::foextrap_bc) and
224 (!is_periodic_in_y or bccomp==BCVars::foextrap_bc)) {
225 Box xlo(bx); xlo.setBig (0,dom_lo.x-1);
226 Box xhi(bx); xhi.setSmall(0,dom_hi.x+1);
227 Box ylo(bx); ylo.setBig (1,dom_lo.y );
228 Box yhi(bx); yhi.setSmall(1,dom_hi.y+1);
229 Box xlo_ylo = xlo & ylo;
230 Box xlo_yhi = xlo & yhi;
231 Box xhi_ylo = xhi & ylo;
232 Box xhi_yhi = xhi & yhi;
233 const bool clamp_west = m_domain_bcs_type[bccomp].lo(0) == REMORABCType::clamped;
234 const bool clamp_east = m_domain_bcs_type[bccomp].hi(0) == REMORABCType::clamped;
235 const bool clamp_south = m_domain_bcs_type[bccomp].lo(1) == REMORABCType::clamped;
236 const bool clamp_north = m_domain_bcs_type[bccomp].hi(1) == REMORABCType::clamped;
237
238 if (!clamp_west && !clamp_south) {
239 ParallelFor(xlo_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
240 {
241 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y+1,k) + dest_arr(dom_lo.x,j,k));
242 });
243 }
244 if (!clamp_west && !clamp_north) {
245 ParallelFor(xlo_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
246 {
247 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_lo.x,j,k));
248 });
249 }
250 if (!clamp_east && !clamp_south) {
251 ParallelFor(xhi_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
252 {
253 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y+1,k) + dest_arr(dom_hi.x,j,k));
254 });
255 }
256 if (!clamp_east && !clamp_north) {
257 ParallelFor(xhi_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
258 {
259 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_hi.x,j,k));
260 });
261 }
262 }
263
264 Gpu::streamSynchronize();
265}
#define NCONS
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NCONS+8 > m_bc_extdir_vals
amrex::Vector< amrex::BCRec > m_domain_bcs_type
amrex::Geometry m_geom
void impose_yvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, const amrex::Array4< const amrex::Real > &mskv, const amrex::Array4< const amrex::Real > &calc_arr, amrex::Real time, int bccomp)
apply y-velocity type boundary conditions