ADD: first commit

This commit is contained in:
2024-05-16 11:01:56 +02:00
parent 934b57b1b4
commit 1295f4db59
31 changed files with 815 additions and 66838 deletions

View File

@ -0,0 +1,113 @@
diff -bur amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp amrex-18.07.1/Src/Base/AMReX_ParallelDescriptor.cpp
--- amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp 2018-07-02 19:40:21
+++ amrex-18.07.1/Src/Base/AMReX_ParallelDescriptor.cpp 2023-07-19 16:50:02
@@ -1950,22 +1950,17 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IntVect iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_INT,
- MPI_UB};
- int blocklens[] = { 1, AMREX_SPACEDIM, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].vect, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ MPI_Datatype types[] = { MPI_INT };
+ int blocklens[] = { AMREX_SPACEDIM };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IntVect)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IntVect), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
@@ -1976,22 +1971,17 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IndexType iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_UNSIGNED,
- MPI_UB};
- int blocklens[] = { 1, 1, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].itype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ MPI_Datatype types[] = { MPI_UNSIGNED };
+ int blocklens[] = { 1 };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IndexType)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IndexType), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
@@ -2002,26 +1992,28 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- Box iv[2]; // Used to construct the data types
+ Box bx[2];
MPI_Datatype types[] = {
- MPI_LB,
Mpi_typemap<IntVect>::type(),
Mpi_typemap<IntVect>::type(),
Mpi_typemap<IndexType>::type(),
- MPI_UB};
- int blocklens[] = { 1, 1, 1, 1, 1};
- MPI_Aint disp[5];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].smallend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].bigend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].btype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ };
+ int blocklens[] = { 1, 1, 1 };
+ MPI_Aint disp[3];
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].smallend, &disp[0]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].bigend, &disp[1]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].btype, &disp[2]) );
+ disp[2] -= disp[0];
+ disp[1] -= disp[0];
+ disp[0] = 0;
+ BL_MPI_REQUIRE( MPI_Type_create_struct(3, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(bx[0])) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(bx[0]), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;

View File

@ -0,0 +1,270 @@
--- amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp 2018-07-02 19:40:21.000000000 +0200
+++ AMReX_ParallelDescriptor.cpp 2021-06-21 15:25:26.752813320 +0200
@@ -54,13 +54,13 @@
#ifdef BL_USE_UPCXX
UPCXX_MPI_Mode Mode;
-#endif
+#endif
#ifdef BL_USE_MPI3
MPI_Win cp_win;
MPI_Win fb_win;
#endif
-
+
namespace util
{
//
@@ -951,7 +951,7 @@
BL_MPI_REQUIRE( MPI_Allreduce(&recv_team, &recv, 1, Mpi_typemap<Real>::type(), op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
else
@@ -994,11 +994,11 @@
BL_MPI_REQUIRE( MPI_Reduce(r, recv_team.dataPtr(), cnt, Mpi_typemap<Real>::type(), op,
0, MyTeam().get_team_comm()) );
if (isTeamLead()) {
- BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
+ BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
Mpi_typemap<Real>::type(), op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(recv.dataPtr(), cnt, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(recv.dataPtr(), cnt, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
else
@@ -1045,7 +1045,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
}
@@ -1098,7 +1098,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, Mpi_typemap<Real>::type(),
+ BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, Mpi_typemap<Real>::type(),
0, MyTeam().get_team_comm()) );
}
}
@@ -1148,7 +1148,7 @@
BL_MPI_REQUIRE( MPI_Allreduce(&recv_team, &recv, 1, MPI_LONG, op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_LONG,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_LONG,
0, MyTeam().get_team_comm()) );
}
else
@@ -1191,7 +1191,7 @@
BL_MPI_REQUIRE( MPI_Reduce(r, recv_team.dataPtr(), cnt, MPI_LONG, op,
0, MyTeam().get_team_comm()) );
if (isTeamLead()) {
- BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
+ BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
MPI_LONG, op,
MyTeam().get_lead_comm()) );
}
@@ -1295,7 +1295,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
0, MyTeam().get_team_comm()) );
}
}
@@ -1345,7 +1345,7 @@
BL_MPI_REQUIRE( MPI_Allreduce(&recv_team, &recv, 1, MPI_INT, op,
MyTeam().get_lead_comm()) );
}
- BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_INT,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv, 1, MPI_INT,
0, MyTeam().get_team_comm()) );
}
else
@@ -1388,7 +1388,7 @@
BL_MPI_REQUIRE( MPI_Reduce(r, recv_team.dataPtr(), cnt, MPI_INT, op,
0, MyTeam().get_team_comm()) );
if (isTeamLead()) {
- BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
+ BL_MPI_REQUIRE( MPI_Allreduce(recv_team.dataPtr(), recv.dataPtr(), cnt,
MPI_INT, op,
MyTeam().get_lead_comm()) );
}
@@ -1492,7 +1492,7 @@
RankInLeadComm(cpu), MyTeam().get_lead_comm()) );
}
if (sameTeam(cpu)) {
- BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
+ BL_MPI_REQUIRE( MPI_Bcast(&recv[0], cnt, MPI_LONG,
0, MyTeam().get_team_comm()) );
}
}
@@ -1735,13 +1735,13 @@
return m_finished;
}
-void ParallelDescriptor::EndParallel ()
+void ParallelDescriptor::EndParallel ()
{
ParallelContext::pop();
}
void ParallelDescriptor::Abort (int s, bool backtrace)
-{
+{
if (backtrace && amrex::system::signal_handling) {
BLBackTrace::handler(s);
} else {
@@ -1950,23 +1950,18 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IntVect iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_INT,
- MPI_UB};
- int blocklens[] = { 1, AMREX_SPACEDIM, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].vect, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
- }
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
- BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
+ MPI_Datatype types[] = { MPI_INT };
+ int blocklens[] = { AMREX_SPACEDIM };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IntVect)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IntVect), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
+ }
+ BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
}
@@ -1976,23 +1971,18 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IndexType iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_UNSIGNED,
- MPI_UB};
- int blocklens[] = { 1, 1, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].itype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
- }
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
- BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
+ MPI_Datatype types[] = { MPI_UNSIGNED };
+ int blocklens[] = { 1 };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IndexType)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IndexType), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
+ }
+ BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
}
@@ -2002,27 +1992,29 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- Box iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- Mpi_typemap<IntVect>::type(),
- Mpi_typemap<IntVect>::type(),
- Mpi_typemap<IndexType>::type(),
- MPI_UB};
- int blocklens[] = { 1, 1, 1, 1, 1};
- MPI_Aint disp[5];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].smallend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].bigend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].btype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
- }
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
- BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
+ Box bx[2];
+ MPI_Datatype types[] = {
+ Mpi_typemap<IntVect>::type(),
+ Mpi_typemap<IntVect>::type(),
+ Mpi_typemap<IndexType>::type(),
+ };
+ int blocklens[] = { 1, 1, 1 };
+ MPI_Aint disp[3];
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].smallend, &disp[0]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].bigend, &disp[1]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].btype, &disp[2]) );
+ disp[2] -= disp[0];
+ disp[1] -= disp[0];
+ disp[0] = 0;
+ BL_MPI_REQUIRE( MPI_Type_create_struct(3, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(bx[0])) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(bx[0]), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
+ }
+ BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
}
@@ -2121,7 +2113,7 @@
team_ranks[i] = MyTeamLead() + i;
}
BL_MPI_REQUIRE( MPI_Group_incl(grp, team_size, team_ranks, &team_grp) );
- BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
+ BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
team_grp, &m_Team.m_team_comm) );
std::vector<int>lead_ranks(m_Team.m_numTeams);
@@ -2129,7 +2121,7 @@
lead_ranks[i] = i * team_size;
}
BL_MPI_REQUIRE( MPI_Group_incl(grp, lead_ranks.size(), &lead_ranks[0], &lead_grp) );
- BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
+ BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(),
lead_grp, &m_Team.m_lead_comm) );
BL_MPI_REQUIRE( MPI_Group_free(&grp) );

65
packages/amrex/package.py Normal file
View File

@ -0,0 +1,65 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
from spack.pkg.builtin.amrex import Amrex as SpackAmrex
class Amrex(SpackAmrex):
version("18.07", sha256="2e8d3a91c5d972e3cebb88d10c4c92112459c88c2342c5a63337f3110bdbff13")
# Config options
variant(
"dimensions",
default="3",
values=("1", "2", "3"),
multi=False,
description="Dimensionality",
when="@18.07",
)
variant("fbaselib", default=False, description="Fbaselib", when="@18.07")
variant("fpe", default=False, description="FPE", when="@18.07")
patch("AMReX_ParallelDescriptor.patch", when="@18.07+mpi")
@when("@18.07")
def cmake_args(self):
args = [
self.define_from_variant("DIM", "dimensions"),
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
self.define_from_variant("ENABLE_MPI", "mpi"),
self.define_from_variant("ENABLE_OMP", "openmp"),
self.define_from_variant("ENABLE_FORTRAN_INTERFACES", "fortran"),
self.define_from_variant("ENABLE_EB", "eb"),
self.define_from_variant("ENABLE_LINEAR_SOLVERS", "linear_solvers"),
self.define_from_variant("ENABLE_AMRDATA", "amrdata"),
self.define_from_variant("ENABLE_PARTICLES", "particles"),
self.define_from_variant("ENABLE_SUNDIALS", "sundials"),
self.define_from_variant("ENABLE_HDF5", "hdf5"),
self.define_from_variant("ENABLE_HYPRE", "hypre"),
self.define_from_variant("ENABLE_PETSC", "petsc"),
self.define_from_variant("ENABLE_CUDA", "cuda"),
self.define_from_variant("ENABLE_PIC", "pic"),
self.define_from_variant("ENABLE_FBASELIB", "fbaselib"),
self.define_from_variant("ENABLE_FPE", "fpe"),
"-DENABLE_DP=%s" % self.spec.variants["precision"].value.upper(),
"-DENABLE_DP_PARTICLES=%s" % self.spec.variants["precision"].value.upper(),
]
if "+linear_solvers" in self.spec:
args.append("-DENABLE_LINEAR_SOLVERS_LEGACY=1")
if self.spec.satisfies("%fj"):
args.append("-DCMAKE_Fortran_MODDIR_FLAG=-M")
if "+cuda" in self.spec:
cuda_arch = self.spec.variants["cuda_arch"].value
args.append("-DCUDA_ARCH=" + self.get_cuda_arch_string(cuda_arch))
return args

13
packages/h5hut/package.py Normal file
View File

@ -0,0 +1,13 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
from spack.pkg.builtin.h5hut import H5hut as SpackH5hut
class H5hut(SpackH5hut):
version("2.0.0rc6", sha256="980a5c634877265bd3a862fbd439d973d44be9c1d2f840c3635e8c6375d62d7e")

View File

@ -0,0 +1,15 @@
Common subdirectories: mithra-2.0.orig/doc and mithra-2.0/doc
diff -u mithra-2.0.orig/makefile mithra-2.0/makefile
--- mithra-2.0.orig/makefile 2020-09-29 14:50:06.000000000 +0200
+++ mithra-2.0/makefile 2022-09-29 11:56:42.000000000 +0200
@@ -1,6 +1,7 @@
SHELL = /bin/sh
COMP = mpic++
+CFLAGS+=-fPIC
CFLAGS+=-std=c++11
CFLAGS+=-O3
Only in mithra-2.0: obj
Common subdirectories: mithra-2.0.orig/prj and mithra-2.0/prj
Common subdirectories: mithra-2.0.orig/src and mithra-2.0/src

View File

@ -0,0 +1,31 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Mithra(MakefilePackage):
"""MITHRA is a full-wave numerical solver for free-electron lasers"""
homepage = "https://github.com/aryafallahi/mithra/tree/master"
url = "https://github.com/aryafallahi/mithra/archive/refs/tags/2.0.tar.gz"
maintainers("germanne", "gsell")
license("UNKNOWN")
version("2.0", sha256="66f27ba6ca2c1ab05abb76338d00e661a6fe96b95283b85409c37625b4758deb")
variant("mpi", default=True, description="Enable MPI support")
depends_on("mpi", when="+mpi")
conflicts("~mpi")
def setup_build_environment(self, env):
env.set("PREFIX", prefix)
def edit(self, spec, prefix):
pass

View File

@ -0,0 +1,113 @@
diff -bur amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp amrex-18.07.1/Src/Base/AMReX_ParallelDescriptor.cpp
--- amrex-18.07/Src/Base/AMReX_ParallelDescriptor.cpp 2018-07-02 19:40:21
+++ amrex-18.07.1/Src/Base/AMReX_ParallelDescriptor.cpp 2023-07-19 16:50:02
@@ -1950,22 +1950,17 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IntVect iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_INT,
- MPI_UB};
- int blocklens[] = { 1, AMREX_SPACEDIM, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].vect, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ MPI_Datatype types[] = { MPI_INT };
+ int blocklens[] = { AMREX_SPACEDIM };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IntVect)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IntVect), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
@@ -1976,22 +1971,17 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- IndexType iv[2]; // Used to construct the data types
- MPI_Datatype types[] = {
- MPI_LB,
- MPI_UNSIGNED,
- MPI_UB};
- int blocklens[] = { 1, 1, 1};
- MPI_Aint disp[3];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].itype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ MPI_Datatype types[] = { MPI_UNSIGNED };
+ int blocklens[] = { 1 };
+ MPI_Aint disp[] = { 0 };
+ BL_MPI_REQUIRE( MPI_Type_create_struct(1, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(IndexType)) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(IndexType), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;
@@ -2002,26 +1992,28 @@
static MPI_Datatype mine(MPI_DATATYPE_NULL);
if ( mine == MPI_DATATYPE_NULL )
{
- Box iv[2]; // Used to construct the data types
+ Box bx[2];
MPI_Datatype types[] = {
- MPI_LB,
Mpi_typemap<IntVect>::type(),
Mpi_typemap<IntVect>::type(),
Mpi_typemap<IndexType>::type(),
- MPI_UB};
- int blocklens[] = { 1, 1, 1, 1, 1};
- MPI_Aint disp[5];
- int n = 0;
- BL_MPI_REQUIRE( MPI_Address(&iv[0], &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].smallend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].bigend, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[0].btype, &disp[n++]) );
- BL_MPI_REQUIRE( MPI_Address(&iv[1], &disp[n++]) );
- for ( int i = n-1; i >= 0; i-- )
- {
- disp[i] -= disp[0];
+ };
+ int blocklens[] = { 1, 1, 1 };
+ MPI_Aint disp[3];
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].smallend, &disp[0]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].bigend, &disp[1]) );
+ BL_MPI_REQUIRE( MPI_Get_address(&bx[0].btype, &disp[2]) );
+ disp[2] -= disp[0];
+ disp[1] -= disp[0];
+ disp[0] = 0;
+ BL_MPI_REQUIRE( MPI_Type_create_struct(3, blocklens, disp, types, &mine) );
+ MPI_Aint lb, extent;
+ BL_MPI_REQUIRE( MPI_Type_get_extent(mine, &lb, &extent) );
+ if (extent != sizeof(bx[0])) {
+ MPI_Datatype tmp = mine;
+ BL_MPI_REQUIRE( MPI_Type_create_resized(tmp, 0, sizeof(bx[0]), &mine) );
+ BL_MPI_REQUIRE( MPI_Type_free(&tmp) );
}
- BL_MPI_REQUIRE( MPI_Type_struct(n, blocklens, disp, types, &mine) );
BL_MPI_REQUIRE( MPI_Type_commit( &mine ) );
}
return mine;

121
packages/opal/package.py Normal file
View File

@ -0,0 +1,121 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Opal(CMakePackage):
"""OPAL (Object Oriented Parallel Accelerator Library) is a parallel open source tool for charged-particle optics in linear accelerators and rings,
including 3D space charge. Using the MAD language with extensions, OPAL can run on a laptop as well as on the largest high performance computing systems.
OPAL is built from the ground up as a parallel application exemplifying the fact that high performance computing is the third leg of science,
complementing theory and experiment."""
homepage = "https://amas.web.psi.ch/opal/Documentation/master/OPAL_Manual.html"
url = "https://gitlab.psi.ch/OPAL/src/-/archive/OPAL-2022.1.0/src-OPAL-2022.1.0.tar.gz"
git = "https://gitlab.psi.ch/OPAL/src.git"
maintainers("germanne", "gsell")
license("GPLv3")
version('master', branch="master")
version("2022.1.0", sha256="31f6811032e9d5400169d28a8ae6c7dee2e4a803ee82e90584d31ff7a0a18d80")
variant(
"build_type",
default="Release",
description="CMake build type",
values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"),
)
variant("mpi", default=True, description="Enable MPI support")
variant("tests", default=True, description="Enable OPAL unit-tests")
variant("amr", default=True, description="Enable AMR solver")
variant("amr_mg_solver", default=True, description="Enable AMR multigrid solver")
variant("static", default=True, description="Enable MPI support")
variant("bandrf", default=True, description="Compile BANDRF field conversion scripts")
variant("static", default=True, description="Enable MPI support")
variant("opal_fel", default=True, description="Enable full-wave solver")
variant("saamg_solver", default=True, description="Enable SAAMG solver")
variant("python", default=True, description="Enable PyOPAL")
variant("mslang", default=True, description="Enable mslang")
variant("ippltests", default=True, description="Enable IPPL tests")
depends_on("openblas")
depends_on("boost+chrono+filesystem+iostreams+regex+serialization+system+timer")
depends_on("boost+mpi", when="+mpi")
depends_on("boost+python", when="+python")
depends_on("python@3.0", when="+python")
depends_on(
"boost+mpi+chrono+filesystem+iostreams+regex+serialization+system+timer", when="+mpi"
)
depends_on("gsl~shared")
depends_on("h5hut+mpi", when="+mpi")
depends_on("h5hut~mpi", when="~mpi")
depends_on(
"amrex@18.07 precision=double dimensions=3 +mpi~openmp+particles+pic~eb",
when="+amr+amr_mg_solver+mpi",
)
depends_on("googletest@1.10.0:", when="+tests")
depends_on("mithra@2.0", when="+opal_fel")
depends_on(
"trilinos@12.0.1: cxxstd=17 +mpi+muelu+nox+zoltan+zoltan2+isorropia gotype=long",
when="+saamg_solver",
)
depends_on(
"trilinos@12.0.1: cxxstd=17 +mpi+muelu+nox+zoltan+zoltan2+isorropia gotype=long",
when="+amr_mg_solver",
)
depends_on("parmetis@4.0.3:", when="+saamg_solver")
depends_on("parmetis@4.0.3:", when="+amr_mg_solver")
# @master
depends_on("h5hut@2.0.0rc6", when="@master")
depends_on("hdf5@1.10.11", when="@master")
depends_on("openmpi@4.1.6", when="@master+mpi")
depends_on("boost@1.84.0", when="@master+saamg_solver")
depends_on("trilinos@13.4.0:", when="@master+saamg_solver")
depends_on("trilinos@13.4.0:", when="@master+amr_mg_solver")
# @2022.1%gcc@10.4.0
depends_on("h5hut@2.0.0rc6", when="@2022.1")
depends_on("hdf5@1.10.8", when="@2022.1")
depends_on("openmpi@4.1.5", when="@2022.1+mpi")
depends_on("boost@1.82.0", when="@2022.1+saamg_solver")
depends_on("trilinos@13.4.0", when="@2022.1+saamg_solver")
depends_on("trilinos@13.4.0", when="@2022.1+amr_mg_solver")
conflicts("~amr", "+amr_mg_solver")
# AMRex version is too old for any newer Trilinos version
requires(
"^trilinos@13.4.0",
when="^amrex@18.07",
msg="AMRex version is too old for any newer Trilinos or GCC versions."
)
def cmake_args(self):
if "+mpi" in self.spec:
env["CC"] = self.spec["mpi"].mpicc
env["CXX"] = self.spec["mpi"].mpicxx
env["FC"] = self.spec["mpi"].mpifc
if "+opal_fel" in self.spec:
env["MITHRA_PREFIX"] = self.spec["mithra"].prefix
args = [
self.define_from_variant("BUILD_OPAL_UNIT_TESTS", "tests"),
self.define_from_variant("ENABLE_AMR", "amr"),
self.define_from_variant("ENABLE_AMR_MG_SOLVER", "amr_mg_solver"),
self.define_from_variant("ENABLE_BANDRF", "bandrf"),
self.define_from_variant("USE_STATIC_LIBRARIES", "static"),
self.define_from_variant("ENABLE_OPAL_FEL", "opal_fel"),
self.define_from_variant("ENABLE_SAAMG_SOLVER", "saamg_solver"),
self.define_from_variant("BUILD_OPAL_PYTHON", "python"),
self.define_from_variant("ENABLE_IPPLTESTS", "ippltests"),
self.define_from_variant("ENABLE_MSLANG", "mslang"),
]
return args