REMORA
Regional Modeling of Oceans Refined Adaptively
Loading...
Searching...
No Matches
REMORA_BoundaryConditions_yvel.cpp
Go to the documentation of this file.
1#include "AMReX_PhysBCFunct.H"
3
4using namespace amrex;
5
6/**
7 * @param[inout] dest_arr data on which to apply BCs
8 * @param[in ] bx box to update on
9 * @param[in ] domain domain box
10 * @param[in ] dxInv pm or pn
11 * @param[in ] mskv land-sea mask on v-points
12 * @param[in ] calc_arr data to use in the RHS of calculations
13 * @param[in ] time current time
14 * @param[in ] bccomp index into both domain_bcs_type_bcr and bc_extdir_vals for icomp=0
15 */
16void REMORAPhysBCFunct::impose_yvel_bcs (const Array4<Real>& dest_arr, const Box& bx, const Box& domain,
17 const GpuArray<Real,AMREX_SPACEDIM> /*dxInv*/, const Array4<const Real>& mskv,
18 const Array4<const Real>& calc_arr,
19 Real /*time*/, int bccomp)
20{
21 BL_PROFILE_VAR("impose_yvel_bcs()",impose_yvel_bcs);
22 const auto& dom_lo = amrex::lbound(domain);
23 const auto& dom_hi = amrex::ubound(domain);
24
25 // Based on BCRec for the domain, we need to make BCRec for this Box
26 // bccomp is used as starting index for m_domain_bcs_type
27 // 0 is used as starting index for bcrs
28 int ncomp = 1;
29 Vector<BCRec> bcrs(ncomp);
30 amrex::setBC(bx, domain, bccomp, 0, ncomp, m_domain_bcs_type, bcrs);
31
32 // xlo: ori = 0
33 // ylo: ori = 1
34 // zlo: ori = 2
35 // xhi: ori = 3
36 // yhi: ori = 4
37 // zhi: ori = 5
38
39 amrex::Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
40#ifdef AMREX_USE_GPU
41 Gpu::htod_memcpy_async(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
42#else
43 std::memcpy(bcrs_d.data(), bcrs.data(), sizeof(BCRec)*ncomp);
44#endif
45 const amrex::BCRec* bc_ptr = bcrs_d.data();
46 const auto* bc_extdir_vals_ptr = m_bc_extdir_vals_d.data();
47
48 GeometryData const& geomdata = m_geom.data();
49 bool is_periodic_in_x = geomdata.isPeriodic(0);
50 bool is_periodic_in_y = geomdata.isPeriodic(1);
51 const Real eps= 1.0e-20_rt;
52
53 Box dest_arr_box = growHi(convert(Box(dest_arr),IntVect(0,1,0)),1,-1);
54 // First do all ext_dir bcs
55 if (!is_periodic_in_x or bccomp == BCVars::foextrap_bc(m_ncons))
56 {
57 // Populate ghost cells on lo-x and hi-x domain boundaries
58 Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
59 Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
60 ParallelFor(
61 grow(bx_xlo,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
62 int iflip = dom_lo.x - 1- i;
63 if (bc_ptr[n].lo(0) == REMORABCType::ext_dir) {
64 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][0]*mskv(i,j,0);
65 } else if (bc_ptr[n].lo(0) == REMORABCType::foextrap || bc_ptr[n].lo(0) == REMORABCType::clamped) {
66 dest_arr(i,j,k) = dest_arr(dom_lo.x,j,k)*mskv(i,j,0);
67 } else if (bc_ptr[n].lo(0) == REMORABCType::orlanski_rad) {
68 Real grad_lo = calc_arr(dom_lo.x ,j+1,k) - calc_arr(dom_lo.x ,j ,k);
69 Real grad_lo_jm1 = calc_arr(dom_lo.x ,j ,k) - calc_arr(dom_lo.x ,j-1,k);
70 Real dVdt = calc_arr(dom_lo.x,j,k) - dest_arr(dom_lo.x ,j,k);
71 Real dVdx = dest_arr(dom_lo.x,j,k) - dest_arr(dom_lo.x+1,j,k);
72 if (dVdt * dVdx < 0.0_rt) dVdt = 0.0_rt;
73 Real dVde = (dVdt * (grad_lo_jm1 + grad_lo) > 0.0_rt) ? grad_lo_jm1 : grad_lo;
74 Real cff = std::max(dVdx*dVdx + dVde*dVde,eps);
75 Real Cx = dVdt * dVdx;
76 dest_arr(i,j,k) = (cff * calc_arr(dom_lo.x-1,j,k) + Cx * dest_arr(dom_lo.x,j,k)) * mskv(i,j,0) / (cff + Cx);
77 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_even) {
78 dest_arr(i,j,k) = dest_arr(iflip,j,k)*mskv(i,j,0);
79 } else if (bc_ptr[n].lo(0) == REMORABCType::reflect_odd) {
80 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*mskv(i,j,0);
81 }
82 },
83 grow(bx_xhi,IntVect(0,-1,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
84 int iflip = 2*dom_hi.x + 1 - i;
85 if (bc_ptr[n].hi(0) == REMORABCType::ext_dir) {
86 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][3]*mskv(i,j,0);
87 } else if (bc_ptr[n].hi(0) == REMORABCType::foextrap || bc_ptr[n].hi(0) == REMORABCType::clamped) {
88 dest_arr(i,j,k) = dest_arr(dom_hi.x,j,k)*mskv(i,j,0);
89 } else if (bc_ptr[n].hi(0) == REMORABCType::orlanski_rad) {
90 Real grad_hi = calc_arr(dom_hi.x ,j+1,k) - calc_arr(dom_hi.x ,j ,k);
91 Real grad_hi_jm1 = calc_arr(dom_hi.x ,j ,k) - calc_arr(dom_hi.x ,j-1,k);
92 Real dVdt = calc_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x ,j,k);
93 Real dVdx = dest_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x-1,j,k);
94 if (dVdt*dVdx < 0.0_rt) dVdt = 0.0_rt;
95 Real dVde = (dVdt * (grad_hi_jm1 + grad_hi) > 0.0_rt) ? grad_hi_jm1 : grad_hi;
96 Real cff = std::max(dVdx*dVdx+dVde*dVde,eps);
97 Real Cx = dVdt * dVdx;
98 dest_arr(i,j,k) = (cff * calc_arr(dom_hi.x+1,j,k) + Cx * dest_arr(dom_hi.x,j,k)) * mskv(i,j,0) / (cff + Cx);
99 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_even) {
100 dest_arr(i,j,k) = dest_arr(iflip,j,k)*mskv(i,j,0);
101 } else if (bc_ptr[n].hi(0) == REMORABCType::reflect_odd) {
102 dest_arr(i,j,k) = -dest_arr(iflip,j,k)*mskv(i,j,0);
103 }
104 }
105 );
106 }
107
108 if (!is_periodic_in_y or bccomp == BCVars::foextrap_bc(m_ncons))
109 {
110 // Populate ghost cells on lo-y and hi-y domain boundaries
111 Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
112 Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+2);
113 Box bx_ylo_face(bx); bx_ylo_face.setSmall(1,dom_lo.y ); bx_ylo_face.setBig(1,dom_lo.y );
114 Box bx_yhi_face(bx); bx_yhi_face.setSmall(1,dom_hi.y+1); bx_yhi_face.setBig(1,dom_hi.y+1);
115
116 ParallelFor(
117 // We only set the values on the domain faces themselves if EXT_DIR or outflow
118 grow(bx_ylo_face,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
119 if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
120 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][1]*mskv(i,j,0);
121 } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap) {
122 dest_arr(i,j,k) = dest_arr(i,dom_lo.y+1,k)*mskv(i,j,0);
123 } else if (bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) {
124 Real grad_lo_jp1 = calc_arr(i ,dom_lo.y+1,k) - calc_arr(i-1,dom_lo.y+1,k);
125 Real grad_lo_ijp1 = calc_arr(i+1,dom_lo.y+1,k) - calc_arr(i ,dom_lo.y+1,k);
126 Real dVdt = calc_arr(i,dom_lo.y+1,k) - dest_arr(i,dom_lo.y+1,k);
127 Real dVde = dest_arr(i,dom_lo.y+1,k) - dest_arr(i,dom_lo.y+2,k);
128 if (dVdt*dVde < 0.0_rt) dVdt = 0.0_rt;
129 Real dVdx = (dVdt * (grad_lo_jp1 + grad_lo_ijp1) > 0.0_rt) ? grad_lo_jp1 : grad_lo_ijp1;
130 Real cff = std::max(dVdx*dVdx + dVde*dVde, eps);
131 Real Ce = dVdt * dVde;
132 dest_arr(i,j,k) = (cff * calc_arr(i,dom_lo.y,k) + Ce * dest_arr(i,dom_lo.y+1,k)) * mskv(i,j,0) / (cff + Ce);
133 }
134 });
135 ParallelFor(
136 grow(bx_ylo,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
137 int jflip = dom_lo.y-j;
138 int inner = (bc_ptr[n].lo(1) == REMORABCType::foextrap) ? 1 : 0;
139 if (bc_ptr[n].lo(1) == REMORABCType::ext_dir) {
140 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][1]*mskv(i,j,0);
141 } else if (bc_ptr[n].lo(1) == REMORABCType::foextrap || bc_ptr[n].lo(1) == REMORABCType::clamped ||
142 bc_ptr[n].lo(1) == REMORABCType::orlanski_rad) {
143 dest_arr(i,j,k) = dest_arr(i,dom_lo.y+inner,k)*mskv(i,j,0);
144 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_even) {
145 dest_arr(i,j,k) = dest_arr(i,jflip,k)*mskv(i,j,0);
146 } else if (bc_ptr[n].lo(1) == REMORABCType::reflect_odd) {
147 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*mskv(i,j,0);
148 }
149 });
150 ParallelFor(
151 // We only set the values on the domain faces themselves if EXT_DIR or outflow
152 grow(bx_yhi_face,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
153 if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
154 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][4]*mskv(i,j,0);
155 } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap) {
156 dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k)*mskv(i,j,0);
157 } else if (bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) {
158 Real grad_hi = calc_arr(i ,dom_hi.y ,k) - calc_arr(i-1,dom_hi.y ,k);
159 Real grad_hi_ip1 = calc_arr(i+1,dom_hi.y ,k) - calc_arr(i ,dom_hi.y ,k);
160 Real dVdt = calc_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y ,k);
161 Real dVde = dest_arr(i,dom_hi.y,k) - dest_arr(i,dom_hi.y-1,k);
162 if (dVdt*dVde < 0.0_rt) dVdt = 0.0_rt;
163 Real dVdx = (dVdt * (grad_hi + grad_hi_ip1) > 0.0_rt) ? grad_hi : grad_hi_ip1;
164 Real cff = std::max(dVdx*dVdx + dVde*dVde, eps);
165 Real Ce = dVdt * dVde;
166 dest_arr(i,j,k) = (cff * calc_arr(i,dom_hi.y+1,k) + Ce * dest_arr(i,dom_hi.y,k)) * mskv(i,j,0) / (cff + Ce);
167 }
168 });
169 ParallelFor(
170 grow(bx_yhi,IntVect(-1,0,0)) & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
171 int jflip = 2*(dom_hi.y + 1) - j;
172 int inner = (bc_ptr[n].hi(1) == REMORABCType::foextrap) ? 1 : 0;
173 if (bc_ptr[n].hi(1) == REMORABCType::ext_dir) {
174 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][4]*mskv(i,j,0);
175 } else if (bc_ptr[n].hi(1) == REMORABCType::foextrap || bc_ptr[n].hi(1) == REMORABCType::clamped ||
176 bc_ptr[n].hi(1) == REMORABCType::orlanski_rad) {
177 dest_arr(i,j,k) = dest_arr(i,dom_hi.y+1-inner,k)*mskv(i,j,0);
178 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_even) {
179 dest_arr(i,j,k) = dest_arr(i,jflip,k)*mskv(i,j,0);
180 } else if (bc_ptr[n].hi(1) == REMORABCType::reflect_odd) {
181 dest_arr(i,j,k) = -dest_arr(i,jflip,k)*mskv(i,j,0);
182 }
183 });
184 }
185
186 {
187 // Populate ghost cells on lo-z and hi-z domain boundaries
188 Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
189 Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
190 ParallelFor(
191 bx_zlo & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
192 int kflip = dom_lo.z - 1 - k;
193 if (bc_ptr[n].lo(2) == REMORABCType::ext_dir) {
194 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][2]*mskv(i,j,0);
195 } else if (bc_ptr[n].lo(2) == REMORABCType::foextrap) {
196 dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z)*mskv(i,j,0);
197 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_even) {
198 dest_arr(i,j,k) = dest_arr(i,j,kflip)*mskv(i,j,0);
199 } else if (bc_ptr[n].lo(2) == REMORABCType::reflect_odd) {
200 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*mskv(i,j,0);
201 }
202 },
203 bx_zhi & dest_arr_box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) {
204 int kflip = 2*dom_hi.z + 1 - k;
205 if (bc_ptr[n].hi(2) == REMORABCType::ext_dir) {
206 dest_arr(i,j,k) = bc_extdir_vals_ptr[bccomp+n][5]*mskv(i,j,0);
207 } else if (bc_ptr[n].hi(2) == REMORABCType::foextrap) {
208 dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z)*mskv(i,j,0);
209 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_even) {
210 dest_arr(i,j,k) = dest_arr(i,j,kflip)*mskv(i,j,0);
211 } else if (bc_ptr[n].hi(2) == REMORABCType::reflect_odd) {
212 dest_arr(i,j,k) = -dest_arr(i,j,kflip)*mskv(i,j,0);
213 }
214 }
215 );
216 }
217
218 if ((!is_periodic_in_x or bccomp == BCVars::foextrap_bc(m_ncons)) and
219 (!is_periodic_in_y or bccomp == BCVars::foextrap_bc(m_ncons))) {
220 Box xlo(bx); xlo.setBig (0,dom_lo.x-1);
221 Box xhi(bx); xhi.setSmall(0,dom_hi.x+1);
222 Box ylo(bx); ylo.setBig (1,dom_lo.y );
223 Box yhi(bx); yhi.setSmall(1,dom_hi.y+1);
224 Box xlo_ylo = xlo & ylo;
225 Box xlo_yhi = xlo & yhi;
226 Box xhi_ylo = xhi & ylo;
227 Box xhi_yhi = xhi & yhi;
228 const bool clamp_west = m_domain_bcs_type[bccomp].lo(0) == REMORABCType::clamped;
229 const bool clamp_east = m_domain_bcs_type[bccomp].hi(0) == REMORABCType::clamped;
230 const bool clamp_south = m_domain_bcs_type[bccomp].lo(1) == REMORABCType::clamped;
231 const bool clamp_north = m_domain_bcs_type[bccomp].hi(1) == REMORABCType::clamped;
232
233 if (!clamp_west && !clamp_south) {
234 ParallelFor(xlo_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
235 {
236 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y+1,k) + dest_arr(dom_lo.x,j,k));
237 });
238 }
239 if (!clamp_west && !clamp_north) {
240 ParallelFor(xlo_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
241 {
242 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_lo.x,j,k));
243 });
244 }
245 if (!clamp_east && !clamp_south) {
246 ParallelFor(xhi_ylo & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
247 {
248 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_lo.y+1,k) + dest_arr(dom_hi.x,j,k));
249 });
250 }
251 if (!clamp_east && !clamp_north) {
252 ParallelFor(xhi_yhi & dest_arr_box, [=] AMREX_GPU_DEVICE (int i, int j, int k)
253 {
254 dest_arr(i,j,k) = 0.5 * (dest_arr(i,dom_hi.y,k) + dest_arr(dom_hi.x,j,k));
255 });
256 }
257 }
258
259 Gpu::streamSynchronize();
260}
amrex::Vector< amrex::BCRec > m_domain_bcs_type
amrex::Geometry m_geom
void impose_yvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, const amrex::Array4< const amrex::Real > &mskv, const amrex::Array4< const amrex::Real > &calc_arr, amrex::Real time, int bccomp)
apply y-velocity type boundary conditions
amrex::Gpu::DeviceVector< amrex::GpuArray< amrex::Real, AMREX_SPACEDIM *2 > > m_bc_extdir_vals_d
int foextrap_bc(int ncons) noexcept