ADD: first commit

This commit is contained in:
2024-05-16 11:01:56 +02:00
parent 934b57b1b4
commit 1295f4db59
31 changed files with 815 additions and 66838 deletions

View File

@ -0,0 +1,113 @@
diff -bur amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp amrex-18.07.1/Src/Base/AMReX_ParallelDescriptor.cpp
--- amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp 2018-07-02 19:40:21
+++ amrex-18.07.1/Src/Base/AMReX_ParallelDescriptor.cpp 2023-07-19 16:50:02
@@ -1950,22 +1950,17 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IntVect iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_INT,
- MPI_UB};
- int blocklens[] = { 1, AMREX_SPACEDIM, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].vect, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ MPI_Datatype types[] = { MPI_INT };
+ int blocklens[] = { AMREX_SPACEDIM };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IntVect)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IntVect), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
@@ -1976,22 +1971,17 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IndexType iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_UNSIGNED,
- MPI_UB};
- int blocklens[] = { 1, 1, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].itype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ MPI_Datatype types[] = { MPI_UNSIGNED };
+ int blocklens[] = { 1 };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IndexType)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IndexType), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
@@ -2002,26 +1992,28 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- Box iv[2]; // Used to construct the data types
+ Box bx[2];
MPI_Datatype types[] = {
- MPI_LB,
Mpi_typemap<IntVect>::type(),
Mpi_typemap<IntVect>::type(),
Mpi_typemap<IndexType>::type(),
- MPI_UB};
- int blocklens[] = { 1, 1, 1, 1, 1};
- MPI_Aint disp[5];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].smallend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].bigend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].btype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ };
+ int blocklens[] = { 1, 1, 1 };
+ MPI_Aint disp[3];
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].smallend, &disp[0]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].bigend, &disp[1]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].btype, &disp[2]) );
+ disp[2] -= disp[0];
+ disp[1] -= disp[0];
+ disp[0] = 0;
+ BL_MPI_REQUIRE( MPI_Type_create_struct(3, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(bx[0])) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(bx[0]), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;

View File

@ -0,0 +1,270 @@
--- amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp 2018-07-02 19:40:21.000000000 +0200
+++ AMReX_ParallelDescriptor.cpp 2021-06-21 15:25:26.752813320 +0200
@@ -54,13 +54,13 @@
#ifdef BL_USE_UPCXX
UPCXX_MPI_Mode Mode;
-#endif
+#endif
#ifdef BL_USE_MPI3
MPI_Win cp_win;
MPI_Win fb_win;
#endif
-
+
namespace util
{
//
@@ -951,7 +951,7 @@
BL_MPI_REQUIRE( MPI_Allreduce(&recv_team, &recv, 1, Mpi_typemap<Real>::type(), op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
else
@@ -994,11 +994,11 @@
BL_MPI_REQUIRE( MPI_Reduce(r, recv_team.dataPtr(), cnt, Mpi_typemap<Real>::type(), op,
0, MyTeam().get_team_comm()) );
if (isTeamLead()) {
- BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
+ BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
Mpi_typemap<Real>::type(), op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(recv.dataPtr(), cnt, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(recv.dataPtr(), cnt, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
else
@@ -1045,7 +1045,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
}
@@ -1098,7 +1098,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
}
@@ -1148,7 +1148,7 @@
BL_MPI_REQUIRE( MPI_Allreduce(&recv_team, &recv, 1, MPI_LONG, op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_LONG,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_LONG,
0, MyTeam().get_team_comm()) );
}
else
@@ -1191,7 +1191,7 @@
BL_MPI_REQUIRE( MPI_Reduce(r, recv_team.dataPtr(), cnt, MPI_LONG, op,
0, MyTeam().get_team_comm()) );
if (isTeamLead()) {
- BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
+ BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
MPI_LONG, op,
MyTeam().get_lead_comm()) );
}
@@ -1295,7 +1295,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
0, MyTeam().get_team_comm()) );
}
}
@@ -1345,7 +1345,7 @@
BL_MPI_REQUIRE( MPI_Allreduce(&recv_team, &recv, 1, MPI_INT, op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_INT,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_INT,
0, MyTeam().get_team_comm()) );
}
else
@@ -1388,7 +1388,7 @@
BL_MPI_REQUIRE( MPI_Reduce(r, recv_team.dataPtr(), cnt, MPI_INT, op,
0, MyTeam().get_team_comm()) );
if (isTeamLead()) {
- BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
+ BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
MPI_INT, op,
MyTeam().get_lead_comm()) );
}
@@ -1492,7 +1492,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
0, MyTeam().get_team_comm()) );
}
}
@@ -1735,13 +1735,13 @@
return m_finished;
}
-void ParallelDescriptor::EndParallel ()
+void ParallelDescriptor::EndParallel ()
{
ParallelContext::pop();
}
void ParallelDescriptor::Abort (int s, bool backtrace)
-{
+{
if (backtrace && amrex::system::signal_handling) {
BLBackTrace::handler(s);
} else {
@@ -1950,23 +1950,18 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IntVect iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_INT,
- MPI_UB};
- int blocklens[] = { 1, AMREX_SPACEDIM, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].vect, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
- }
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
- BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
+ MPI_Datatype types[] = { MPI_INT };
+ int blocklens[] = { AMREX_SPACEDIM };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IntVect)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IntVect), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
+ }
+ BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
}
@@ -1976,23 +1971,18 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IndexType iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_UNSIGNED,
- MPI_UB};
- int blocklens[] = { 1, 1, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].itype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
- }
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
- BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
+ MPI_Datatype types[] = { MPI_UNSIGNED };
+ int blocklens[] = { 1 };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IndexType)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IndexType), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
+ }
+ BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
}
@@ -2002,27 +1992,29 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- Box iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- Mpi_typemap<IntVect>::type(),
- Mpi_typemap<IntVect>::type(),
- Mpi_typemap<IndexType>::type(),
- MPI_UB};
- int blocklens[] = { 1, 1, 1, 1, 1};
- MPI_Aint disp[5];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].smallend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].bigend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].btype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
- }
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
- BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
+ Box bx[2];
+ MPI_Datatype types[] = {
+ Mpi_typemap<IntVect>::type(),
+ Mpi_typemap<IntVect>::type(),
+ Mpi_typemap<IndexType>::type(),
+ };
+ int blocklens[] = { 1, 1, 1 };
+ MPI_Aint disp[3];
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].smallend, &disp[0]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].bigend, &disp[1]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].btype, &disp[2]) );
+ disp[2] -= disp[0];
+ disp[1] -= disp[0];
+ disp[0] = 0;
+ BL_MPI_REQUIRE( MPI_Type_create_struct(3, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(bx[0])) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(bx[0]), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
+ }
+ BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
}
@@ -2121,7 +2113,7 @@
team_ranks[i] = MyTeamLead() + i;
}
BL_MPI_REQUIRE( MPI_Group_incl(grp, team_size, team_ranks, &team_grp) );
- BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
+ BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
team_grp, &m_Team.m_team_comm) );
std::vector<int>lead_ranks(m_Team.m_numTeams);
@@ -2129,7 +2121,7 @@
lead_ranks[i] = i * team_size;
}
BL_MPI_REQUIRE( MPI_Group_incl(grp, lead_ranks.size(), &lead_ranks[0], &lead_grp) );
- BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
+ BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
lead_grp, &m_Team.m_lead_comm) );
BL_MPI_REQUIRE( MPI_Group_free(&grp) );

65
packages/amrex/package.py Normal file
View File

@ -0,0 +1,65 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
from spack.pkg.builtin.amrex import Amrex as SpackAmrex
class Amrex(SpackAmrex):
version("18.07", sha256="2e8d3a91c5d972e3cebb88d10c4c92112459c88c2342c5a63337f3110bdbff13")
# Config options
variant(
"dimensions",
default="3",
values=("1", "2", "3"),
multi=False,
description="Dimensionality",
when="@18.07",
)
variant("fbaselib", default=False, description="Fbaselib", when="@18.07")
variant("fpe", default=False, description="FPE", when="@18.07")
patch("AMReX_ParallelDescriptor.patch", when="@18.07+mpi")
@when("@18.07")
def cmake_args(self):
args = [
self.define_from_variant("DIM", "dimensions"),
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
self.define_from_variant("ENABLE_MPI", "mpi"),
self.define_from_variant("ENABLE_OMP", "openmp"),
self.define_from_variant("ENABLE_FORTRAN_INTERFACES", "fortran"),
self.define_from_variant("ENABLE_EB", "eb"),
self.define_from_variant("ENABLE_LINEAR_SOLVERS", "linear_solvers"),
self.define_from_variant("ENABLE_AMRDATA", "amrdata"),
self.define_from_variant("ENABLE_PARTICLES", "particles"),
self.define_from_variant("ENABLE_SUNDIALS", "sundials"),
self.define_from_variant("ENABLE_HDF5", "hdf5"),
self.define_from_variant("ENABLE_HYPRE", "hypre"),
self.define_from_variant("ENABLE_PETSC", "petsc"),
self.define_from_variant("ENABLE_CUDA", "cuda"),
self.define_from_variant("ENABLE_PIC", "pic"),
self.define_from_variant("ENABLE_FBASELIB", "fbaselib"),
self.define_from_variant("ENABLE_FPE", "fpe"),
"-DENABLE_DP=%s" % self.spec.variants["precision"].value.upper(),
"-DENABLE_DP_PARTICLES=%s" % self.spec.variants["precision"].value.upper(),
]
if "+linear_solvers" in self.spec:
args.append("-DENABLE_LINEAR_SOLVERS_LEGACY=1")
if self.spec.satisfies("%fj"):
args.append("-DCMAKE_Fortran_MODDIR_FLAG=-M")
if "+cuda" in self.spec:
cuda_arch = self.spec.variants["cuda_arch"].value
args.append("-DCUDA_ARCH=" + self.get_cuda_arch_string(cuda_arch))
return args