54 Geometry
const& fgeom,
55 BoxArray
const& cba, DistributionMapping
const& cdm,
56 Geometry
const& cgeom,
57 int nghost,
int nghost_set,
58 int ncomp, InterpBase* interp)
60 AMREX_ALWAYS_ASSERT(nghost <= 0);
61 AMREX_ALWAYS_ASSERT(nghost_set <= 0);
62 AMREX_ALWAYS_ASSERT(nghost <= nghost_set);
77 IndexType m_ixt = fba.ixType();
80 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
89 cbl.reserve(fba.size());
90 for (
int i(0); i < fba.size(); ++i) {
91 Box coarse_box(interp->CoarseBox(fba[i],
m_ratio));
92 cbl.push_back(coarse_box);
96 BoxArray cf_cba(std::move(cbl));
103 m_cf_mask = std::make_unique<iMultiFab> (fba, fdm, 1, 0);
106 if (nghost_set <= 0) {
120 Box fba_bnd = amrex::grow(fba.minimalBox(), IntVect(1,1,1));
123 BoxList com_bl; BoxArray com_ba;
126 fba.complementIn(com_bl,fba_bnd);
129 AMREX_ALWAYS_ASSERT(com_bl.size() > 0);
131 IntVect box_grow_vect(-nghost,-nghost,0);
138 if (fba.ixType()[0] == IndexType::NODE) {
139 box_grow_vect = IntVect(1,0,0);
140 }
else if (fba.ixType()[1] == IndexType::NODE) {
141 box_grow_vect = IntVect(0,1,0);
142 }
else if (fba.ixType()[2] == IndexType::NODE) {
143 box_grow_vect = IntVect(0,0,1);
148 Vector<Box>& com_bl_v = com_bl.data();
149 for (
int i(0); i<com_bl.size(); ++i) {
150 Box& bx = com_bl_v[i];
151 bx.grow(box_grow_vect);
157 com_ba.define(std::move(com_bl));
158 com_ba.complementIn(com_bl, fba_bnd);
161 for (MFIter mfi(*
m_cf_mask); mfi.isValid(); ++mfi) {
162 const Box& vbx = mfi.validbox();
163 const Array4<int>& mask_arr =
m_cf_mask->array(mfi);
165 for (
auto const& b : com_bl) {
166 Box com_bx = vbx & b;
167 ParallelFor(com_bx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k)
noexcept
169 mask_arr(i,j,k) = mask_val;
210 MultiFab
const& crse,
227 Box per_grown_domain =
m_cgeom.Domain();
228 for (
int dim = 0; dim < AMREX_SPACEDIM; dim++) {
230 per_grown_domain.grow(dim,1);
234 for (MFIter mfi(fine); mfi.isValid(); ++mfi)
236 Box
const& fbx = mfi.validbox();
238 slope.resize(fbx,ncomp,The_Async_Arena());
240 Array4<Real>
const& fine_arr = fine.array(mfi);
241 Array4<Real>
const& slope_arr = slope.array();
242 Array4<Real const>
const& crse_arr = crse.const_array(mfi);
243 Array4<int const>
const& mask_arr =
m_cf_mask->const_array(mfi);
245 if (fbx.type(0) == IndexType::NODE)
248 AMREX_HOST_DEVICE_PARALLEL_FOR_3D_FLAG(RunOn::Gpu,fbx,i,j,k,
250 if (mask_arr(i,j,k) == mask_val) {
251 const int ii = amrex::coarsen(i,ratio[0]);
252 if (i-ii*ratio[0] == 0) {
253 interp_face_reg(i,j,k,ratio,fine_arr,0,crse_arr,slope_arr,ncomp,per_grown_domain,0);
260 AMREX_HOST_DEVICE_PARALLEL_FOR_3D_FLAG(RunOn::Gpu,fbx,i,j,k,
262 if (mask_arr(i,j,k) == mask_val) {
263 const int ii = amrex::coarsen(i,ratio[0]);
264 if (i-ii*ratio[0] != 0) {
265 Real
const w =
static_cast<Real
>(i-ii*ratio[0]) * (Real(1.)/Real(ratio[0]));
266 fine_arr(i,j,k,0) = (Real(1.)-w) * fine_arr(ii*ratio[0],j,k,0) + w * fine_arr((ii+1)*ratio[0],j,k,0);
272 else if (fbx.type(1) == IndexType::NODE)
275 AMREX_HOST_DEVICE_PARALLEL_FOR_3D_FLAG(RunOn::Gpu,fbx,i,j,k,
277 if (mask_arr(i,j,k) == mask_val) {
278 const int jj = amrex::coarsen(j,ratio[1]);
279 if (j-jj*ratio[1] == 0) {
280 interp_face_reg(i,j,k,ratio,fine_arr,0,crse_arr,slope_arr,ncomp,per_grown_domain,1);
287 AMREX_HOST_DEVICE_PARALLEL_FOR_3D_FLAG(RunOn::Gpu,fbx,i,j,k,
289 if (mask_arr(i,j,k) == mask_val) {
290 const int jj = amrex::coarsen(j,ratio[1]);
291 if (j-jj*ratio[1] != 0) {
292 Real
const w =
static_cast<Real
>(j-jj*ratio[1]) * (Real(1.)/Real(ratio[1]));
293 fine_arr(i,j,k,0) = (Real(1.)-w) * fine_arr(i,jj*ratio[1],k,0) + w * fine_arr(i,(jj+1)*ratio[1],k,0);
301 AMREX_HOST_DEVICE_PARALLEL_FOR_3D_FLAG(RunOn::Gpu,fbx,i,j,k,
303 if (mask_arr(i,j,k) == mask_val) {
304 const int kk = amrex::coarsen(k,ratio[2]);
305 if (k-kk*ratio[2] == 0) {
306 interp_face_reg(i,j,k,ratio,fine_arr,0,crse_arr,slope_arr,1,per_grown_domain,2);
313 AMREX_HOST_DEVICE_PARALLEL_FOR_3D_FLAG(RunOn::Gpu,fbx,i,j,k,
315 if (mask_arr(i,j,k) == mask_val) {
316 const int kk = amrex::coarsen(k,ratio[2]);
317 if (k-kk*ratio[2] != 0) {
318 Real
const w =
static_cast<Real
>(k-kk*ratio[2]) * (Real(1.)/Real(ratio[2]));
319 fine_arr(i,j,k,0) = (Real(1.)-w) * fine_arr(i,j,kk*ratio[2],0) + w * fine_arr(i,j,(kk+1)*ratio[2],0);
334 MultiFab
const& crse,
335 Vector<BCRec>
const& bcr,
340 IndexType m_ixt = fine.boxArray().ixType();
341 Box
const& cdomain = amrex::convert(
m_cgeom.Domain(), m_ixt);
343 for (MFIter mfi(fine); mfi.isValid(); ++mfi) {
344 Box
const& fbx = mfi.validbox();
346 Array4<Real>
const& fine_arr = fine.array(mfi);
347 Array4<Real const>
const& crse_arr = crse.const_array(mfi);
348 Array4<int const>
const& mask_arr =
m_cf_mask->const_array(mfi);
350 bool run_on_gpu = Gpu::inLaunchRegion();
351 amrex::ignore_unused(run_on_gpu);
355 const Box& crse_region =
m_interp->CoarseBox(fbx,ratio);
356 Box cslope_bx(crse_region);
357 for (
int dim = 0; dim < AMREX_SPACEDIM; dim++) {
358 if (ratio[dim] > 1) {
359 cslope_bx.grow(dim,-1);
363 FArrayBox ccfab(cslope_bx, ncomp*AMREX_SPACEDIM, The_Async_Arena());
364 Array4<Real>
const& tmp = ccfab.array();
365 Array4<Real const>
const& ctmp = ccfab.const_array();
368 AsyncArray<BCRec> async_bcr(bcr.data(), (run_on_gpu) ? ncomp : 0);
369 BCRec
const* bcrp = (run_on_gpu) ? async_bcr.data() : bcr.data();
371 BCRec
const* bcrp = bcr.data();
374 AMREX_HOST_DEVICE_PARALLEL_FOR_4D_FLAG(RunOn::Gpu, cslope_bx, ncomp, i, j, k, n,
376 mf_cell_cons_lin_interp_mcslope(i,j,k,n, tmp, crse_arr, 0, ncomp,
377 cdomain, ratio, bcrp);
380 AMREX_HOST_DEVICE_PARALLEL_FOR_4D_FLAG(RunOn::Gpu, fbx, ncomp, i, j, k, n,
382 if (mask_arr(i,j,k) == mask_val) mf_cell_cons_lin_interp(i,j,k,n, fine_arr, 0, ctmp,
383 crse_arr, 0, ncomp, ratio);