REMORA
Regional Modeling of Oceans Refined Adaptively
Loading...
Searching...
No Matches
REMORA_prestep_t_advection.cpp
Go to the documentation of this file.
1#include <REMORA.H>
2
3using namespace amrex;
4
5/**
6 * @param[in ] tbx tile box
7 * @param[in ] gbx grown tile box
8 * @param[in ] tempold scalar at last time
9 * @param[in ] tempcache cached current time step's scalar value
10 * @param[in ] Hz vertical cell height
11 * @param[in ] Huon u-volume flux
12 * @param[in ] Hvom v-volume flux
13 * @param[in ] Akv vertical viscosity coefficient
14 * @param[inout] W vertical velocity
15 * @param DC temporary
16 * @param FC temporary
17 * @param[ out] tempstore scratch space for calculations on scalars
18 * @param[in ] z_w z coordinates at w points
19 * @param[in ] h bathymetry
20 * @param[in ] pm 1/dx
21 * @param[in ] pn 1/dy
22 * @param[in ] msku land-sea mask on u-points
23 * @param[in ] mskv land-sea mask on v-points
24 * @param[in ] river_pos river positions
25 * @param[in ] river_source river source data to add, if using
26 * @param[in ] iic which time step we're on
27 * @param[in ] ntfirst what is the first time step?
28 * @param[in ] nrhs index of RHS component
29 * @param[in ] N number of vertical levels
30 * @param[in ] dt_lev time step at this level
31 */
32
33void
34REMORA::prestep_t_advection (const Box& tbx, const Box& gbx,
35 const Array4<Real >& tempold,
36 const Array4<Real >& tempcache,
37 const Array4<Real >& Hz,
38 const Array4<Real >& Huon,
39 const Array4<Real >& Hvom,
40 const Array4<Real >& W ,
41 const Array4<Real >& DC,
42 const Array4<Real >& FC ,
43 const Array4<Real >& tempstore,
44 const Array4<Real const>& z_w,
45 const Array4<Real const>& h,
46 const Array4<Real const>& pm,
47 const Array4<Real const>& pn,
48 const Array4<Real const>& msku,
49 const Array4<Real const>& mskv,
50 const Array4<int const>& river_pos,
51 const Array4<Real const>& river_source,
52 int iic, int ntfirst, int nrhs, int N,
53 Real dt_lev)
54{
55 const Box& domain = geom[0].Domain();
56 const auto dlo = amrex::lbound(domain);
57 const auto dhi = amrex::ubound(domain);
58
59 GeometryData const& geomdata = geom[0].data();
60 bool is_periodic_in_x = geomdata.isPeriodic(0);
61 bool is_periodic_in_y = geomdata.isPeriodic(1);
62
63 //copy the tilebox
64 Box gbx1 = tbx;
65 Box gbx2 = tbx;
66 Box tbxp1 = tbx;
67 Box tbxp2 = tbx;
68
69 tbxp1.grow(IntVect(NGROW-1,NGROW-1,0));
70 tbxp2.grow(IntVect(NGROW,NGROW,0));
71 FArrayBox fab_FX(tbxp2,1,amrex::The_Async_Arena()); //3D
72 FArrayBox fab_FE(tbxp2,1,amrex::The_Async_Arena()); //3D
73 FArrayBox fab_curv(tbxp2,1,amrex::The_Async_Arena()); //fab_curv.setVal(0.0_rt);
74 FArrayBox fab_grad(tbxp2,1,amrex::The_Async_Arena()); //fab_curv.setVal(0.0_rt);
75
76 auto FX=fab_FX.array();
77 auto FE=fab_FE.array();
78 auto curv=fab_curv.array();
79 auto grad=fab_grad.array();
80 ParallelFor(tbxp2,
81 [=] AMREX_GPU_DEVICE (int i, int j, int k)
82 {
83 grad(i,j,k)=0.0_rt;
84
85 curv(i,j,k)=0.0_rt;
86
87 FX(i,j,k)=0.0_rt;
88 FE(i,j,k)=0.0_rt;
89 });
90
91 Box ubx = surroundingNodes(tbx,0);
92 Box vbx = surroundingNodes(tbx,1);
93
94 Box utbxp1 = surroundingNodes(tbxp1,0);
95 Box vtbxp1 = surroundingNodes(tbxp1,1);
96
97 Box gbx3uneven_init(IntVect(AMREX_D_DECL(tbx.smallEnd(0)-3,tbx.smallEnd(1)-3,tbx.smallEnd(2))),
98 IntVect(AMREX_D_DECL(tbx.bigEnd(0)+2,tbx.bigEnd(1)+2,tbx.bigEnd(2))));
99 BoxArray ba_gbx3uneven = intersect(BoxArray(gbx3uneven_init), gbx);
100 AMREX_ASSERT((ba_gbx3uneven.size() == 1));
101 Box gbx3uneven = ba_gbx3uneven[0];
102
103 gbx2.grow(IntVect(NGROW,NGROW,0));
104 BoxArray ba_gbx2 = intersect(BoxArray(gbx2), gbx);
105 AMREX_ASSERT((ba_gbx2.size() == 1));
106 gbx2 = ba_gbx2[0];
107
108 gbx1.grow(IntVect(NGROW-1,NGROW-1,0));
109 BoxArray ba_gbx1 = intersect(BoxArray(gbx1), gbx);
110 AMREX_ASSERT((ba_gbx1.size() == 1));
111 gbx1 = ba_gbx1[0];
112
113 //------------------------------------------------------------------------
114 // Vertically integrate horizontal mass flux divergence.
115 //------------------------------------------------------------------------
116 //
117 //Should really use gbx3uneven
118 Box gbx3unevenD = gbx3uneven;
119 gbx3unevenD.makeSlab(2,0);
120 Box gbx1D = gbx1;
121 gbx1D.makeSlab(2,0);
122
123 ParallelFor(gbx1D,
124 [=] AMREX_GPU_DEVICE (int i, int j, int )
125 {
126 // Starting with zero vertical velocity at the bottom, integrate
127 // from the bottom (k=0) to the free-surface (k=N). The w(:,:,N(ng))
128 // contains the vertical velocity at the free-surface, d(zeta)/d(t).
129 // Notice that barotropic mass flux divergence is not used directly.
130 //
131 //W(i,j,-1)=0.0_rt;
132 W(i,j,0) = 0.0_rt;
133 for(int k=1;k<=N+1;k++) {
134 W(i,j,k) = W(i,j,k-1) - (Huon(i+1,j,k-1)-Huon(i,j,k-1)) - (Hvom(i,j+1,k-1)-Hvom(i,j,k-1));
135 }
136 });
137 ParallelFor(gbx1D,
138 [=] AMREX_GPU_DEVICE (int i, int j, int )
139 {
140 // Starting with zero vertical velocity at the bottom, integrate
141 // from the bottom (k=0) to the free-surface (k=N). The w(:,:,N(ng))
142 // contains the vertical velocity at the free-surface, d(zeta)/d(t).
143 // Notice that barotropic mass flux divergence is not used directly.
144 //
145 Real wrk_i=W(i,j,N+1)/(z_w(i,j,N+1)+h(i,j,0,0));
146
147 for (int k=1; k<=N; k++) {
148 W(i,j,k) = W(i,j,k)- wrk_i*(z_w(i,j,k)+h(i,j,0,0));
149 }
150 W(i,j,N+1)=0.0_rt;
151 });
152
153 //From ini_fields and .in file
154 //fab_Akt.setVal(1e-6);
155 FArrayBox fab_stflux(tbxp2,1,amrex::The_Async_Arena());
156 auto stflux= fab_stflux.array();
157 FArrayBox fab_btflux(tbxp2,1,amrex::The_Async_Arena());
158 auto btflux= fab_btflux.array();
159
160 //From ini_fields and .in file
161 //fab_stflux.setVal(0.0_rt);
162 //also set btflux=0 (as in ana_btflux.H)
163
164 ParallelFor(tbxp2, [=] AMREX_GPU_DEVICE (int i, int j, int k)
165 {
166 stflux(i,j,k)=0.0_rt;
167 btflux(i,j,k)=0.0_rt;
168 });
169
170 //Use FC and DC as intermediate arrays for FX and FE
171 //First pass do centered 2d terms
172
174 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
175 {
176 FX(i,j,k)=Box(tempold).contains(i-1,j,k) ? Huon(i,j,k)*
177 0.5_rt*(tempold(i-1,j,k)+tempold(i ,j,k)) : 1e34_rt;
178 });
179 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
180 {
181 FE(i,j,k)=Box(tempold).contains(i,j-1,k) ? Hvom(i,j,k)*
182 0.5_rt*(tempold(i,j-1,k)+tempold(i,j,k)) : 1e34_rt;
183 });
184
185 } else {
186
187 ParallelFor(utbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
188 {
189 //should be t index 3
190 FX(i,j,k)=(tempold(i,j,k,nrhs)-tempold(i-1,j,k,nrhs)) * msku(i,j,0);
191 });
192
193 Box utbxp1_slab_lo = makeSlab(utbxp1,0,dlo.x-1) & utbxp1;
194 Box utbxp1_slab_hi = makeSlab(utbxp1,0,dhi.x+1) & utbxp1;
195 if (utbxp1_slab_lo.ok() && !is_periodic_in_x) {
196 ParallelFor(utbxp1_slab_lo, [=] AMREX_GPU_DEVICE (int i, int j, int k)
197 {
198 FX(i,j,k) = FX(i+1,j,k);
199 });
200 }
201 if (utbxp1_slab_hi.ok() && !is_periodic_in_x) {
202 ParallelFor(utbxp1_slab_hi, [=] AMREX_GPU_DEVICE (int i, int j, int k)
203 {
204 FX(i+1,j,k) = FX(i,j,k);
205 });
206 }
207
208 ParallelFor(vtbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
209 {
210 //should be t index 3
211 FE(i,j,k)=(tempold(i,j,k,nrhs)-tempold(i,j-1,k,nrhs)) * mskv(i,j,0);
212 });
213
214 Box vtbxp1_slab_lo = makeSlab(vtbxp1,1,dlo.y-1) & vtbxp1;
215 Box vtbxp1_slab_hi = makeSlab(vtbxp1,1,dhi.y+1) & vtbxp1;
216 if (vtbxp1_slab_lo.ok() && !is_periodic_in_y) {
217 ParallelFor(vtbxp1_slab_lo, [=] AMREX_GPU_DEVICE (int i, int j, int k)
218 {
219 FE(i,j,k) = FE(i,j+1,k);
220 });
221 }
222 if (vtbxp1_slab_hi.ok() && !is_periodic_in_y) {
223 ParallelFor(vtbxp1_slab_hi, [=] AMREX_GPU_DEVICE (int i, int j, int k)
224 {
225 FE(i,j+1,k) = FE(i,j,k);
226 });
227 }
228
229 Real cffa=1.0_rt/6.0_rt;
230 Real cffb=1.0_rt/3.0_rt;
232 {
233 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
234 {
235 //Upstream3
236 curv(i,j,k)=-FX(i,j,k)+FX(i+1,j,k);
237 });
238
239 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
240 {
241 Real max_Huon = std::max(Huon(i,j,k),0.0_rt);
242 Real min_Huon = std::min(Huon(i,j,k),0.0_rt);
243
244 FX(i,j,k)=Huon(i,j,k)*0.5_rt*(tempold(i,j,k)+tempold(i-1,j,k))-
245 cffa*(curv(i,j,k)*min_Huon+ curv(i-1,j,k)*max_Huon);
246 });
247
249
250 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
251 {
252 //Centered4
253 grad(i,j,k)=0.5_rt*(FX(i,j,k)+FX(i+1,j,k));
254 });
255
256 ParallelFor(ubx, [=] AMREX_GPU_DEVICE (int i, int j, int k)
257 {
258 FX(i,j,k)=Huon(i,j,k)*0.5_rt*(tempold(i,j,k)+tempold(i-1,j,k)-
259 cffb*(grad(i,j,k)-grad(i-1,j,k)));
260 });
261 } else {
262 Error("Not a valid horizontal advection scheme");
263 }
264
266 {
267 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
268 {
269 curv(i,j,k)=-FE(i,j,k)+FE(i,j+1,k);
270 });
271
272 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
273 {
274 Real max_Hvom = std::max(Hvom(i,j,k),0.0_rt);
275 Real min_Hvom = std::min(Hvom(i,j,k),0.0_rt);
276
277 FE(i,j,k)=Hvom(i,j,k)*0.5_rt*(tempold(i,j,k)+tempold(i,j-1,k))-
278 cffa*(curv(i,j,k)*min_Hvom+ curv(i,j-1,k)*max_Hvom);
279 });
280
282
283 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
284 {
285 grad(i,j,k)=0.5_rt*(FE(i,j,k)+FE(i,j+1,k));
286 });
287
288 ParallelFor(vbx, [=] AMREX_GPU_DEVICE (int i, int j, int k)
289 {
290 FE(i,j,k)=Hvom(i,j,k)*0.5_rt*(tempold(i,j,k)+tempold(i,j-1,k)-
291 cffb*(grad(i,j,k)- grad(i,j-1,k)));
292 });
293 } else {
294 Error("Not a valid horizontal advection scheme");
295 }
296 } // not flat
297
298 bool do_rivers_cons = (river_source.size() > 0);
300 int* river_direction_d = river_direction.data();
301 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
302 {
303 int iriver = river_pos(i,j,0);
304 if (iriver >= 0) {
305 if (river_direction_d[iriver] == 0) {
306 FX(i,j,k) = (!do_rivers_cons) ? 0.0_rt : Huon(i,j,k) * river_source(iriver,0,k);
307 } else {
308 FE(i,j,k) = (!do_rivers_cons) ? 0.0_rt : Hvom(i,j,k) * river_source(iriver,0,k);
309 }
310 }
311 });
312 }
313
314 //Intermediate tracer at 3
315 //
316 // Time-step horizontal advection (m Tunits).
317 //
318
319 Real cff1 = 0.0_rt, cff2 = 0.0_rt, cff;
320
321 Real GammaT = 1.0_rt/6.0_rt;
322
323 if (iic==ntfirst)
324 {
325 cff=0.5_rt*dt_lev;
326 cff1=1.0_rt;
327 cff2=0.0_rt;
328 } else {
329 cff=(1.0_rt-GammaT)*dt_lev;
330 cff1=0.5_rt+GammaT;
331 cff2=0.5_rt-GammaT;
332 }
333
334 ParallelFor(tbx, [=] AMREX_GPU_DEVICE (int i, int j, int k)
335 {
336 tempstore(i,j,k)=Hz(i,j,k)*( cff1 * tempold(i,j,k)+
337 cff2 * tempcache(i,j,k) )-
338 cff * pm(i,j,0)*pn(i,j,0) * (FX(i+1,j,k)-FX(i,j,k)+
339 FE(i,j+1,k)-FE(i,j,k));
340 });
341
342 //
343 // Time-step vertical advection of tracers (Tunits). Impose artificial
344 // continuity equation.
345 //
346 ParallelFor(convert(tbx,IntVect(0,0,1)), [=] AMREX_GPU_DEVICE (int i, int j, int k)
347 {
348 //-----------------------------------------------------------------------
349 // Add in vertical advection.
350 //-----------------------------------------------------------------------
351
352 Real c1=0.5_rt;
353 Real c2=7.0_rt/12.0_rt;
354 Real c3=1.0_rt/12.0_rt;
355
356 if (k>=2 && k<=N-1)
357 {
358 FC(i,j,k)=( c2*(tempold(i ,j,k-1,nrhs)+ tempold(i,j,k ,nrhs))
359 -c3*(tempold(i ,j,k-2 ,nrhs)+ tempold(i,j,k+1,nrhs)) )*
360 ( W(i,j,k));
361 }
362 else if (k==N+1) // this needs to be split up so that the following can be concurrent
363 {
364 FC(i,j,N+1)=0.0_rt;
365 } else if (k==N) {
366 FC(i,j,N) = ( c2*tempold(i,j,N-1,nrhs)+ c1*tempold(i,j,N,nrhs)-c3*tempold(i,j,N-2,nrhs) )
367 * W(i,j,N);
368 } else if (k==1) {
369 FC(i,j, 1) = ( c2*tempold(i,j, 1,nrhs)+ c1*tempold(i,j,0,nrhs)-c3*tempold(i,j,2,nrhs) )
370 * W(i,j,1);
371 } else if (k==0) {
372 FC(i,j, 0) = 0.0_rt;
373 }
374 });
375
376 ParallelFor(tbxp1, [=] AMREX_GPU_DEVICE (int i, int j, int k)
377 {
378 //if(k-1>=0) {
379 DC(i,j,k)=1.0_rt/(Hz(i,j,k)-
380 cff*pm(i,j,0)*pn(i,j,0)*
381 (Huon(i+1,j,k)-Huon(i,j,k)+
382 Hvom(i,j+1,k)-Hvom(i,j,k)+
383 (W(i,j,k+1)-W(i,j,k))));
384 //} else {
385 //DC(i,j,k)=1.0_rt/(Hz(i,j,k)-
386 // cff*pm(i,j,0)*pn(i,j,0)*
387 // (Huon(i+1,j,k)-Huon(i,j,k)+
388 // Hvom(i,j+1,k)-Hvom(i,j,k)+
389 // (W(i,j,k))));
390 //}
391 });
392
393 ParallelFor(tbx,
394 [=] AMREX_GPU_DEVICE (int i, int j, int k)
395 {
396 Real c1 = cff*pm(i,j,0)*pn(i,j,0);
397
398 Real c4 = FC(i,j,k+1)-FC(i,j,k);
399
400 tempstore(i,j,k) = DC(i,j,k)*(tempstore(i,j,k)-c1*c4);
401 });
402
403}
#define NGROW
static SolverChoice solverChoice
Container for algorithmic choices.
Definition REMORA.H:1221
amrex::Gpu::DeviceVector< int > river_direction
Vector over rivers of river direction: 0: u-face; 1: v-face; 2: w-face.
Definition REMORA.H:1038
void prestep_t_advection(const amrex::Box &tbx, const amrex::Box &gbx, const amrex::Array4< amrex::Real > &tempold, const amrex::Array4< amrex::Real > &tempcache, const amrex::Array4< amrex::Real > &Hz, const amrex::Array4< amrex::Real > &Huon, const amrex::Array4< amrex::Real > &Hvom, const amrex::Array4< amrex::Real > &W, const amrex::Array4< amrex::Real > &DC, const amrex::Array4< amrex::Real > &FC, const amrex::Array4< amrex::Real > &sstore, const amrex::Array4< amrex::Real const > &z_w, const amrex::Array4< amrex::Real const > &h, const amrex::Array4< amrex::Real const > &pm, const amrex::Array4< amrex::Real const > &pn, const amrex::Array4< amrex::Real const > &msku, const amrex::Array4< amrex::Real const > &mskv, const amrex::Array4< int const > &river_pos, const amrex::Array4< amrex::Real const > &river_source, int iic, int ntfirst, int nrhs, int N, const amrex::Real dt_lev)
Prestep advection calculations for the tracers.
AdvectionScheme tracer_Hadv_scheme