first part of LBL merge: h5core and H5Part

This commit is contained in:
Marc Howison
2010-06-26 17:29:37 +00:00
parent 6ea921eba4
commit 8783ac2cf2
44 changed files with 2192 additions and 2002 deletions
+3
View File
@@ -374,6 +374,7 @@ src/C/H5Fed_retrieve.c -text
src/C/H5Fed_store.c -text
src/C/H5Fed_tags.c -text
src/C/H5Part.c -text
src/C/H5_attrib.c -text
src/C/H5_inquiry.c -text
src/C/Makefile.am -text
src/Fortran/H5BlockF.c -text
@@ -444,6 +445,7 @@ src/h5core/h5t_tags.c -text
src/h5core/h5t_tags_private.h -text
src/h5core/h5t_types_private.h -text
src/h5core/h5u_errorhandling_private.h -text
src/h5core/h5u_model.c -text
src/h5core/h5u_readwrite.c -text
src/h5core/h5u_types_private.h -text
src/include/H5.h -text
@@ -475,6 +477,7 @@ src/include/h5core/h5t_ref_elements.h -text
src/include/h5core/h5t_retrieve.h -text
src/include/h5core/h5t_storemesh.h -text
src/include/h5core/h5t_tags.h -text
src/include/h5core/h5u_model.h -text
src/include/h5core/h5u_readwrite.h -text
test/H5Block/BlockTestSpecs.txt -text
test/H5Block/H5BlockDissolveGhosts.c -text
+2 -7
View File
@@ -1,9 +1,5 @@
SUBDIRS = \
doc \
src/C \
src/h5core \
test \
tools
SUBDIRS = \
src
MAINTAINERCLEANFILES = \
config.h \
@@ -11,4 +7,3 @@ MAINTAINERCLEANFILES = \
config.status \
stamp-h.in \
stamp-h1
-147
View File
@@ -1,147 +0,0 @@
Error Handling
--------------
Now all functions, in which an error could occure, are returning a
value. This value is either a 64bit integer or a pointer.
A negative 64bit integer as result indicates an error. Values >= 0
indicates successfull execution. This is similiar to the convention
used in UNIX system calls, but we do not return -1 but the error
number (which is always negative).
For functions returning a pointer the NULL-pointer is used to indicate
an error. You can call the function H5PartGetErrno() to get the error
number. For the time being there is no strerror(3) or perror(3)
equivalent.
API Changes
-----------
There are several changes in the API of H5Part. The biggest change is
a side-effect of the implementation of strict error handling. The
impact to the existing C/C++ code is very small. But now you *can*
and *should* implement some kind of error handling. The simplest
error handling is to set an error handler which aborts the program as
soon as an error occured.
The next "biggest" change is the use of H5Part types instead of "long
long" and "double". The replacement of "long long" is "h5part_int64_t"
and for "double" "h5part_float64_t". This change has now effect in
Fortran.
In some functions the argument type changed from "int" to
"h5part_int64_t". Since on most systems "int" is a 32bit integer,
calls to these functions must be adapted to the new API. In C/C++ the
compiler will complain about it. But in Fortran you will *not* get an
error message.So, you must check your Fortran code carefully.
Changes in detail (not listed are functions where only the return
type changed from any to "h5part_int64_t"):
New API Changes to old API
h5part_int64_t void
H5PartSetNumParticles (
H5PartFile *f,
h5part_int64_t nparticles long long
);
h5part_int64_t int
H5PartWriteDataFloat64 (
H5PartFile *f,
char *name,
h5part_float64_t *dta double
);
h5part_int64_t int
H5PartWriteDataInt64 (
H5PartFile *f,
char *name,
h5part_int64_t *dta long long
);
h5part_int64_t void
H5PartSetStep (
H5PartFile *f,
h5part_int64_t step int
);
h5part_int64_t void
H5PartSetView (
H5PartFile *f,
h5part_int64_t start, long long
h5part_int64_t end long long
);
h5part_int64_t int
H5PartGetView (
H5PartFile *f,
h5part_int64_t *start, long long
h5part_int64_t *end long long
);
h5part_int64_t int
H5PartReadDataFloat64 (
H5PartFile *f,
char *name,
h5part_float64_t *dta double
);
h5part_int64_t int
H5PartReadDataInt64 (
H5PartFile *f,
char *name,
h5part_int64_t *dta long long
);
h5part_int64_t void
H5PartReadParticleStep (
H5PartFile *f,
h5part_int64_t step, int
h5part_float64_t *x, double
h5part_float64_t *y, double
h5part_float64_t *z, double
h5part_float64_t *px, double
h5part_float64_t *py, double
h5part_float64_t *pz, double
h5part_int64_t *id long long
);
New functions:
h5part_int64_t
H5PartSetVerbosityLevel (
unsigned int
);
h5part_int64_t
H5PartSetErrorHandler (
h5part_error_handler handler
);
h5part_int64_t
H5PartGetErrno (
void
);
h5part_int64_t
H5PartDefaultErrorHandler (
const char *funcname,
const h5part_int64_t eno,
const char *fmt,
...
);
h5part_int64_t
H5PartAbortErrorHandler (
const char *funcname,
const h5part_int64_t eno,
const char *fmt,
...
);
Removed functions:
int
H5PartFileIsValid (
H5PartFile *f
);
-418
View File
@@ -1,418 +0,0 @@
==============================================================================
README file for H5Part configure
==============================================================================
+ 0. HDF5 library
-----------------
Make sure you have a working version of the HDF5 library in your computer. If you plan
to use parallel I/O you need to use a parallel HDF5 version. If you don't have the
library download the sources from http://hdf.ncsa.uiuc.edu/HDF5/
+ 1. Quick start (If you feel lucky...)
----------------------------------------
For many platforms, where compilers are installed in the "default" location
and all the environment variables are set correctly, it should be sufficient
to type:
./configure [OPTIONS] && make [install]
to have the libraries and test program compiled.
==============================================================================
+ 2. configure line options
----------------------------
(1) Enable options
--enable-fortran
--enable-parallel
--enable-tools
--enable-python
--enable-64 (only for AIX and Irix)
(2) Setting compilers related variables manually
To assign environment variables (e.g., CC, CFLAGS...), specify them as
VAR=VALUE. See below for descriptions of some of the useful variables.
Compiler environment variables:
CC C compiler command
CXX C++ compiler command
FC Fortran compiler command
(3) Setting prefix for installation manually
--prefix=PREFIX install files in PREFIX
For more information, type:
./configure --help
==============================================================================
+ 3. Platform specific settings for compiling libraries & test programs
------------------------------------------------------------------------
Building
make : builds the libraries and test programs
make install: builds and installs
make clean : removes extraneous object files
make distclean : returns configuration to unconfigured state
As tested on hosts:
(1) Davinci <davinci.nersc.gov> (Linux"SUSE"-ia64)
---------------------------------------------------
FIRST THING FIRST!! Type:
module load intel
to have the proper compilers set up for use.
module load hdf5_par
[NOTE: module load hdf5 does not work at this moment.
Possible installation error of HDF5 on Davinci.]
Brief profile:
CC = icc
CXX = icc
FC = ifort
MPICC = icc
MPICXX = icc
MPIFC = ifort
If default doesn't work, try:
For C shell:
env CC=icc CXX=icc FC=ifort MPICC=icc MPICXX=icc MPIFC=ifort ./configure && make install
For Korn or Bourne shell:
CC=icc CXX=icc FC=ifort MPICC=icc MPICXX=icc MPIFC=ifort ./configure && make install
And, of course, you should add configure options, such as --enable-fortran --enable-parallel.
On this host, executing:
For C shell:
env CC=icc CXX=icc FC=ifort MPICC=icc MPICXX=icc MPIFC=ifort ./configure --enable-fortran --enable-parallel && make install
For Korn or Bourne shell:
CC=icc CXX=icc FC=ifort MPICC=icc MPICXX=icc MPIFC=ifort ./configure --enable-fortran --enable-parallel && make install
has functioned correctly.
To run test program, go to test directory:
(i) Serial test
run:
./H5testF
./H5PartTest
(ii) Parallel test
run:
mpirun 2 H5testFpar
mpirun 2 H5PartTestP
NOTE: Number 2 above is a arbitrary number that indicates the number of processes.
(2) Jacquard <jacquard.nersc.gov> (Linux"SUSE"-x86_64)
-------------------------------------------------------
Brief profile:
CC = pathcc
CXX = pathCC
FC = pathf90
MPICC = mpicc
MPICXX = mpicxx
MPIFC = mpif90
If default doesn't work, try:
For C shell:
env CC=pathcc CXX=pathCC FC=pathf90 MPICC=mpicc MPICXX=mpicxx MPIFC=mpif90 ./configure && make install
For Korn or Bourne shell:
CC=pathcc CXX=pathCC FC=pathf90 MPICC=mpicc MPICXX=mpicxx MPIFC=mpif90 ./configure && make install
And, of course, you should add configure options, such as --enable-fortran --enable-parallel.
On this host, executing:
For C shell:
env CC=pathcc CXX=pathCC FC=pathf90 MPICC=mpicc MPICXX=mpicxx MPIFC=mpif90 ./configure --enable-fortran --enable-parallel && make install
For Korn or Bourne shell:
CC=pathcc CXX=pathCC FC=pathf90 MPICC=mpicc MPICXX=mpicxx MPIFC=mpif90 ./configure --enable-fortran --enable-parallel && make install
has functioned correctly.
To run test program, go to test directory:
(i) Serial test
run:
./H5testF
./H5PartTest
(ii) Parallel test
In Jacquard, we need a script to run parallel programs, since Jacquard does not support interactive parallel processing. Instead, we need to submit the task to a queue. A sample script (run_H5testFpar.scr) may look like below:
+++++++++++++++++++++++++++++++++++++++
#!/bin/csh
#PBS -l nodes=1:ppn=2,walltime=00:05:00
#PBS -N H5testFpar
#PBS -o H5testFpar.out
#PBS -e H5testFpar.err
#PBS -q debug
#PBS -A mpccc
#PBS -V
setenv PBS_OWORKDIR /home/H5Part/test
cd $PBS_O_WORKDIR
mpirun -np 2 ./H5testFpar
+++++++++++++++++++++++++++++++++++++++
Then, we need to submit the task to a queue by running:
qsub run_H5testFpar.scr
After processed, the result will appear in H5testFpar.out, and error messages will appear in H5testFpar.err.
(3) Bassi <bassi.nersc.ogv> (AIX5.3.0.0-Power 3)
-------------------------------------------------
FIRST THING FIRST!! Type:
module load gcc
to have the proper compilers set up for use.
Brief profile:
CC = cc_r
CXX = cc_r
FC = xlf_r
MPICC = mpcc_r
MPICXX = mpcc_r
MPIFC = mpxlf_r
If default doesn't work, try:
For C shell:
env CC=cc_r CXX=cc_r FC=xlf_r MPICC=mpcc_r MPICXX=mpcc_r MPIFC=mpxlf_r ./configure && make install
For Korn or Bourne shell:
CC=cc_r CXX=cc_r FC=xlf_r MPICC=mpcc_r MPICXX=mpcc_r MPIFC=mpxlf_r ./configure && make install
And, of course, you should add configure options, such as --enable-fortran --enable-parallel.
On this host, executing:
For C shell:
env CC=cc_r CXX=cc_r FC=xlf_r MPICC=mpcc_r MPICXX=mpcc_r MPIFC=mpxlf_r ./configure --enable-fortran --enable-parallel && make install
For Korn or Bourne shell:
CC=cc_r CXX=cc_r FC=xlf_r MPICC=mpcc_r MPICXX=mpcc_r MPIFC=mpxlf_r ./configure --enable-fortran --enable-parallel && make install
has functioned correctly.
To run test program, go to test directory:
(i) Serial test
run:
./H5testF
./H5PartTest
(ii) Parallel test
run:
unsetenv MP_SINGLE_THREAD
poe ./H5PartTestP -procs 2 -nodes 1
poe ./H5testFpar -procs 2 -nodes 1
For more details on how to run parallel programs on Bassi, go to:
http://www.nersc.gov/nusers/resources/bassi/running_jobs/
(4) Starsky <starsky.lbl.gov> (Darwin8.5.0-PowerPC)
----------------------------------------------------
FIRST THING FIRST!! Set environment variables: (in tcsh)
setenv LD_LIBRARY_PATH /usr/local/g95/lib/gcc-lib/powerpc-apple-darwin8.5.0/4.0.3/
setenv PATH ${PATH}:/usr/local/g95/bin
NOTE: This environment variable setting is specifit to Starsky. It is to make configure find the fortran compiler.
Brief profile:
CC = gcc
CXX = g++
FC = g95
If default doesn't work, try:
For C shell:
env CC=gcc CXX=g++ FC=g95 ./configure && make install
For Korn or Bourne shell:
CC=gcc CXX=g++ FC=g95 ./configure && make install
And, of course, you should add configure options, such as --enable-fortran --enable-parallel.
On this host, executing:
For C shell:
env CC=gcc CXX=g++ FC=g95 ./configure --enable-fortran && make install
For Korn or Bourne shell:
CC=gcc CXX=g++ FC=g95 ./configure --enable-fortran && make install
has functioned correctly.
To run test program, go to test directory:
(i) Serial test
run:
./H5testF
./H5PartTest
(5) Linux AMD64 Visualization group workstations (Linux"SUSE"-x86_64)
---------------------------------------------------------
Brief profile:
CC = gcc
CXX = g++
FC = g95
If default doesn't work, try:
For C shell:
env CC=gcc CXX=g++ FC=g95 ./configure && make install
For Korn or Bourne shell:
CC=gcc CXX=g++ FC=g95 ./configure && make install
And, of course, you should add configure options, such as --enable-fortran --enable-parallel.
On this host, executing:
For C shell:
env CC=gcc CXX=g++ FC=g95 ./configure --enable-fortran && make install
For Korn or Bourne shell:
CC=gcc CXX=g++ FC=g95 ./configure --enable-fortran && make install
has functioned correctly.
To run test program, go to test directory:
(i) Serial test
run:
./H5testF
./H5PartTest
(6) Cray XT3 @ CSCS gele
1) modules/3.1.6 9) PrgEnv-pgi/1.4.26 17) xt-lustre-ss/1.4.26
2) MySQL/4.0.26 10) xt-pbs/5.3.4 18) Base-opts/1.4.26
3) acml/3.0 11) xt-service/1.4.26 19) subversion/1.3.2
4) pgi/6.1.4 12) xt-libc/1.4.26 20) zlib/1.2.3
5) totalview/7.2.0 13) xt-os/1.4.26 21) szip/2.0
6) xt-libsci/1.4.26 14) xt-catamount/1.4.26 22) hdf5/1.6.5
7) xt-mpt/1.4.26 15) xt-boot/1.4.26
8) xt-pe/1.4.26 16) xt-crms/1.4.26
CFLAGS="-I$H5HOME/include -DF77_SINGLE_UNDERSCORE" LDFLAGS="-L$H5HOME/lib -L$ZHOME/lib" ./configure --enable-parallel --enable-fortran
(7) merlin00 / merlin3
Currently Loaded Modulefiles:
1) hdf5/hdf5-1.6.5 2) root/root-5.10.00 3) mpi/mpich2-1.0.3-pgi-6.1 4) pgi/pgi_64-6.1
FC=mpif90 ./configure --enable-parallel --enable-fortran
==============================================================================
+ 4. Trouble shooting (Things to check for...)
--------------------------------------------
(0) Have you set the LD_LIBRARY_PATH?
---Some systems require the user to manually set the environment variable, LD_LIBRARY_PATH. To do so:
set the environment variable HDF5ROOT or PHDF5ROOT to point to your installation of HDF5 (serial and/or parallel respectively).
For Korn or Bourne shell:
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${HDF5ROOT}/lib;export LD_LIBRARY_PATH
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:{PHDF5ROOT}/lib;export LD_LIBRARY_PATH
For C shell:
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:${HDF5ROOT}/lib
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:${PHDF5ROOT}/lib
Note that the values of HDF5ROOT & PHDF5ROOT will be in the "summary" section when you run configure.
Not properly set LD_LIBRARY_PATH value may result in a runtime error:
./H5PartTest: error while loading shared libraries: libhdf5.so.0: cannot open shared object file: No such file or directory
(1) Are the compilers set correctly?
---Check with: which COMPILER
(2) Have you installed HDF5?
---You can get it at: http://vis.lbl.gov/Research/AcceleratorSAPP/index.html
(3) Have you loaded the proper modules?
---It is necessary in Davinci & Bassi, and configure && make install may not work without it.
+8 -38
View File
@@ -1,12 +1,12 @@
# Every configure script must call AC_INIT before doing anything else.
# AC_INIT (package, version, [bug-report], [tarname])
AC_INIT([H5Part], [1.99.0], [h5part@lists.psi.ch], H5Part)
AC_INIT([H5hut], [1.99.0], [h5part@lists.psi.ch], H5Part)
# Ensure that a recent enough version of Autoconf is being used.
# If the version of Autoconf being used to create configure is earlier than version,
# print an error message to the standard error output and do not create configure.
#AC_PREREQ(2.59)
AC_PREREQ(2.59)
# should be called right after AC_INIT.
@@ -54,13 +54,6 @@ AC_ARG_WITH(
[path to HDF5 installation [default=""]])],
[HDF5PATH=$withval], [HDF5PATH=""])
AC_ARG_WITH(
[mpiposix],
[AC_HELP_STRING([--with-mpiposix],
[enable MPI-POSIX VFD in HDF5 [default=no]])],
[USE_MPIPOSIX=$enableval])
###############################################################################
################# A SIMPLE WORK AROUND TO USE ENV. VARS #######################
###############################################################################
@@ -134,18 +127,7 @@ fi
# If CC is not already set in the environment, check for gcc and cc, then
# for other C compilers.
# Set output variable CC to the name of the compiler found.
AC_PROG_CC(pathcc icc cc_r gcc cc)
# AC_PROG_CC doesn't pick up cc_r in Bassi. The following AC_PATH_PROGS
# is the fix.
AC_PATH_PROGS([BAS_CC], [cc_r], [], [$PATH])
# if BAS_CC not empty
if test -n "$BAS_CC"; then
AC_MSG_CHECKING([CC setting for Bassi])
CC=$BAS_CC
AC_MSG_RESULT([CC = $CC])
fi
AC_PROG_CC(pgcc pathcc icc cc_r gcc cc)
# Determine a C++ compiler to use.
@@ -155,17 +137,7 @@ fi
# then search for a C++ compiler under the likely names (first g++ and c++
# then other names).
# If none of those checks succeed, then as a last resort set CXX to g++.
AC_PROG_CXX(pathCC icc cc_r g++ gcc cc)
# DAVINCI SPECIFIC!!!
AC_MSG_CHECKING([for icc linker -lstdc++ flag])
if test ${CXX} = "icc"; then
AC_MSG_RESULT([yes])
STDCXX="-lstdc++"
else
AC_MSG_RESULT([no])
fi
AC_PROG_CXX(pgCC pathCC icc cc_r g++ gcc cc)
# Set output variable INSTALL to the path of a BSD-compatible install program,
@@ -177,7 +149,6 @@ AC_PROG_INSTALL
# AM_INIT_AUTOMAKE is required to use autoconf with automake
AM_INIT_AUTOMAKE()
AC_PROG_RANLIB
# Default prefix for bindir, etc... (eg >> ./build/bin)
@@ -203,7 +174,7 @@ AC_MSG_CHECKING([if fortran interface enabled])
if test "X$USE_FORTRAN" = "Xyes"; then
AC_MSG_RESULT([yes])
AC_PROG_FC(ifort xlf_r pathf90 g95 g90 ftn gfortran)
AC_PROG_FC(pgf90 ifort xlf_r pathf90 g95 g90 ftn gfortran)
if test -z "$FC" ; then
AC_MSG_ERROR([Cannot find a fortran compiler!!!])
exit 1
@@ -255,10 +226,6 @@ if test "X$USE_PARALLEL" = "Xyes"; then
MPI_Comm_size( comm, &n ); ],
[AC_MSG_RESULT([yes]); r='yes'], [AC_MSG_RESULT([no]); r='no'] )
if test "X$USE_MPIPOSIX" = "Xyes"; then
CFLAGS="${CFLAGS} -DH5PART_MPIPOSIX"
fi
if test "X$r" = "Xno"; then
AC_PATH_PROGS([MPICC], [mpicc mpcc_r], [], [$PATH])
AC_PATH_PROGS([MPICXX], [mpicxx mpcc_r], [], [$PATH])
@@ -476,6 +443,9 @@ if test -n "$SAVE_MPIINC"; then
fi
#CFLAGS="$SAVE_CFLAGS ${CFLAGS}"
if test -n "$SAVE_CFLAGS"; then
CFLAGS=$SAVE_CFLAGS $CFLAGS
fi
if test -n "$SAVE_FFLAGS"; then
FFLAGS=$SAVE_FFLAGS
+36 -5
View File
@@ -44,7 +44,7 @@
/*!
\ingroup h5_c_api_general
Open file with name \c filename. This function is available in the paralell
Open file with name \c filename. This function is available in the parallel
and serial version. In the serial case \c comm may have any value.
\return File handle.
@@ -53,10 +53,10 @@
h5_file_t*
H5OpenFile (
const char* filename, /*!< file name */
const h5_int32_t oflag, /*!< file open flags */
const MPI_Comm comm /*!< MPI communicator */
h5_int32_t flags, /*!< file open flags */
MPI_Comm comm /*!< MPI communicator */
) {
return h5_open_file (filename, H5_O_RDWR, comm, __func__);
return h5_open_file (filename, flags, comm, __func__);
}
/*!
@@ -108,7 +108,7 @@ H5GetStepNameFormat (
h5_file_t* const f, /*!< Handle to file */
char* name, /*!< OUT: Prefix */
const h5_size_t l_name, /*!< length of buffer name */
h5_size_t* width /*!< OUT: Width of the number */
int* width /*!< OUT: Width of the number */
) {
SET_FNAME (f, __func__);
@@ -182,6 +182,35 @@ H5TraverseSteps (
return h5_traverse_steps (f);
}
/*!
\ingroup h5part_performance
Set the `throttle` factor, which causes HDF5 write and read
calls to be issued in that number of batches.
This can prevent large concurrency parallel applications that
use independent writes from overwhelming the underlying
parallel file system.
Throttling only works with the H5_VFD_MPIPOSIX or
H5_VFD_INDEPENDENT drivers and is only available in
the parallel library.
\return \c H5_SUCCESS
*/
#ifdef PARALLEL_IO
h5_err_t
H5SetThrottle (
h5_file_t* f,
int factor
) {
SET_FNAME( f, __func__ );
return h5_set_throttle( f, factor );
}
#endif // PARALLEL_IO
/*!
\ingroup h5part_c_api
\defgroup h5part_c_api_errhandling Error Handling
@@ -261,3 +290,5 @@ H5GetErrno (
return h5_get_errno (f);
}
/*! @} */
+3 -3
View File
@@ -778,7 +778,7 @@ _open_block_group (
b->blockgroup = h5priv_open_group (
f,
f->step_gid,
H5BLOCK_GROUPNAME_BLOCK )
H5_BLOCKNAME )
);
}
b->step_idx = f->step_idx;
@@ -1366,7 +1366,7 @@ H5BlockGetNumFields (
SET_FNAME ( f, __func__ );
CHECK_TIMEGROUP( f );
if ( ! _have_object ( f->step_gid, H5BLOCK_GROUPNAME_BLOCK ) )
if ( ! _have_object ( f->step_gid, H5_BLOCKNAME ) )
return 0;
return h5_get_num_hdf5_groups (f, f->b->blockgroup);
@@ -1826,7 +1826,7 @@ H5BlockHasFieldData (
SET_FNAME ( f, __func__ );
CHECK_TIMEGROUP( f );
if ( ! _have_object ( f->step_gid, H5BLOCK_GROUPNAME_BLOCK ) ) {
if ( ! _have_object ( f->step_gid, H5_BLOCKNAME ) ) {
return H5_ERR_NOENTRY;
}
return H5_SUCCESS;
+6 -2
View File
@@ -80,7 +80,9 @@ H5FedGetNumVertices (
h5_file_t* const f /*!< file handle */
) {
SET_FNAME (f, __func__);
return h5t_get_num_vertices (f, f->myproc);
// MLH: can't use field from opaque h5_file_t!
//return h5t_get_num_vertices (f, f->myproc);
return h5t_get_num_vertices (f, -1);
}
/*!
@@ -130,7 +132,9 @@ H5FedGetNumElements (
h5_file_t* const f
) {
SET_FNAME (f, __func__);
return h5t_get_num_elems (f, f->myproc);
// MLH: can't use field from opaque h5_file_t!
//return h5t_get_num_elems (f, f->myproc);
return h5t_get_num_elems (f, -1);
}
/*!
+418 -688
View File
File diff suppressed because it is too large Load Diff
+329
View File
@@ -0,0 +1,329 @@
/********************** attribute API ****************************************/
/*!
\ingroup h5part_c_api
\defgroup h5part_c_api_attrib Reading and Writing Attributes
*/
/*!
\ingroup h5part_c_api_attrib
Writes a string attribute bound to a file.
This function creates a new attribute \c name with the string \c value as
content. The attribute is bound to the file associated with the file handle
\c f.
If the attribute already exists an error will be returned. There
is currently no way to change the content of an existing attribute.
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartWriteFileAttribString (
h5_file_t *f, /*!< [in] Handle to open file */
const char *attrib_name,/*!< [in] Name of attribute to create */
const char *attrib_value/*!< [in] Value of attribute */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno( f );
return h5_write_attrib (
f,
f->root_gid,
attrib_name,
H5T_NATIVE_CHAR,
attrib_value,
strlen ( attrib_value ) + 1 );
}
/*!
\ingroup h5part_c_api_attrib
Writes a string attribute bound to the current time-step.
This function creates a new attribute \c name with the string \c value as
content. The attribute is bound to the current time step in the file given
by the file handle \c f.
If the attribute already exists an error will be returned. There
is currently no way to change the content of an existing attribute.
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartWriteStepAttribString (
h5_file_t *f, /*!< [in] Handle to open file */
const char *attrib_name,/*!< [in] Name of attribute to create */
const char *attrib_value/*!< [in] Value of attribute */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno( f );
return h5_write_attrib (
f,
f->step_gid,
attrib_name,
H5T_NATIVE_CHAR,
attrib_value,
strlen ( attrib_value ) + 1 );
}
/*!
\ingroup h5part_c_api_attrib
Writes a attribute bound to the current time-step.
This function creates a new attribute \c name with the string \c value as
content. The attribute is bound to the current time step in the file given
by the file handle \c f.
The value of the attribute is given the parameter \c type, which must be one
of \c H5T_NATIVE_DOUBLE, \c H5T_NATIVE_INT64 of \c H5T_NATIVE_CHAR, the array
\c value and the number of elements \c nelem in the array.
If the attribute already exists an error will be returned. There
is currently no way to change the content of an existing attribute.
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartWriteStepAttrib (
h5_file_t *f, /*!< [in] Handle to open file */
const char *attrib_name, /*!< [in] Name of attribute */
const h5_int64_t attrib_type,/*!< [in] Type of value. */
const void *attrib_value, /*!< [in] Value of attribute */
const h5_int64_t attrib_nelem/*!< [in] Number of elements */
){
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno( f );
return h5_write_attrib (
f,
f->step_gid,
attrib_name,
(hid_t)attrib_type,
attrib_value,
attrib_nelem );
}
/*!
\ingroup h5part_c_api_attrib
Writes a attribute bound to a file.
This function creates a new attribute \c name with the string \c value as
content. The attribute is bound to the file file given by the file handle
\c f.
The value of the attribute is given the parameter \c type, which must be one
of H5T_NATIVE_DOUBLE, H5T_NATIVE_INT64 of H5T_NATIVE_CHAR, the array \c value
and the number of elements \c nelem in the array.
If the attribute already exists an error will be returned. There
is currently no way to change the content of an existing attribute.
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartWriteFileAttrib (
h5_file_t *f, /*!< [in] Handle to open file */
const char *attrib_name, /*!< [in] Name of attribute */
const h5_int64_t attrib_type,/*!< [in] Type of value. */
const void *attrib_value, /*!< [in] Value of attribute */
const h5_int64_t attrib_nelem/*!< [in] Number of elements */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno( f );
return h5_write_attrib (
f,
f->root_gid,
attrib_name,
(hid_t)attrib_type,
attrib_value,
attrib_nelem );
}
/*!
\ingroup h5part_c_api_attrib
Gets the number of attributes bound to the current step.
\return Number of attributes bound to current time step or error code.
*/
h5_int64_t
H5PartGetNumStepAttribs (
h5_file_t *f /*!< [in] Handle to open file */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno( f );
return h5_get_num_attribs ( f, f->step_gid );
}
/*!
\ingroup h5part_c_api_attrib
Gets the number of attributes bound to the file.
\return Number of attributes bound to file \c f or error code.
*/
h5_int64_t
H5PartGetNumFileAttribs (
h5_file_t *f /*!< [in] Handle to open file */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno( f );
return h5_get_num_attribs ( f, f->root_gid );
}
/*!
\ingroup h5part_c_api_attrib
Gets the name, type and number of elements of the step attribute
specified by its index.
This function can be used to retrieve all attributes bound to the
current time-step by looping from \c 0 to the number of attribute
minus one. The number of attributes bound to the current
time-step can be queried by calling the function
\c H5PartGetNumStepAttribs().
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartGetStepAttribInfo (
h5_file_t *f, /*!< [in] Handle to open file */
const h5_int64_t attrib_idx,/*!< [in] Index of attribute to
get infos about */
char *attrib_name, /*!< [out] Name of attribute */
const h5_int64_t len_of_attrib_name,
/*!< [in] length of buffer \c name */
h5_int64_t *attrib_type, /*!< [out] Type of value. */
h5_int64_t *attrib_nelem /*!< [out] Number of elements */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno( f );
return h5_get_attrib_info (
f,
f->step_gid,
attrib_idx,
attrib_name,
len_of_attrib_name,
attrib_type,
attrib_nelem );
}
/*!
\ingroup h5part_c_api_attrib
Gets the name, type and number of elements of the file attribute
specified by its index.
This function can be used to retrieve all attributes bound to the
file \c f by looping from \c 0 to the number of attribute minus
one. The number of attributes bound to file \c f can be queried
by calling the function \c H5PartGetNumFileAttribs().
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartGetFileAttribInfo (
h5_file_t *f, /*!< [in] Handle to open file */
const h5_int64_t attrib_idx,/*!< [in] Index of attribute to get
infos about */
char *attrib_name, /*!< [out] Name of attribute */
const h5_int64_t len_of_attrib_name,
/*!< [in] length of buffer \c name */
h5_int64_t *attrib_type, /*!< [out] Type of value. */
h5_int64_t *attrib_nelem /*!< [out] Number of elements */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno ( f );
return h5_get_attrib_info (
f,
f->root_gid,
attrib_idx,
attrib_name,
len_of_attrib_name,
attrib_type,
attrib_nelem );
}
/*!
\ingroup h5part_c_api_attrib
Reads an attribute bound to current time-step.
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartReadStepAttrib (
h5_file_t *f, /*!< [in] Handle to open file */
const char *attrib_name, /*!< [in] Name of attribute to read */
void *attrib_value /*!< [out] Value of attribute */
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno ( f );
return h5_read_attrib ( f, f->step_gid, attrib_name, attrib_value );
}
/*!
\ingroup h5part_c_api_attrib
Reads an attribute bound to file \c f.
\return \c H5_SUCCESS or error code
*/
h5_int64_t
H5PartReadFileAttrib (
h5_file_t *f,
const char *attrib_name,
void *attrib_value
) {
SET_FNAME ( f, __func__ );
if ( h5_check_filehandle ( f ) != H5_SUCCESS )
return h5_get_errno ( f );
return h5_read_attrib ( f, f->root_gid, attrib_name, attrib_value );
}
+3 -6
View File
@@ -30,13 +30,13 @@
\return Number of compute notes.
\return \c -1 on error.
*/
h5_size_t
int
H5GetNumNodes (
h5_file_t* const f
) {
SET_FNAME (f, __func__);
CHECK_FILEHANDLE (f);
return (h5_size_t)f->nprocs;
return h5_get_num_procs(f);
}
/*!
@@ -60,10 +60,7 @@ H5GetNumSteps (
SET_FNAME (f, __func__);
CHECK_FILEHANDLE (f);
return h5_get_num_hdf5_groups_matching_prefix (
f,
f->step_gid,
f->prefix_step_name);
return h5_get_num_steps(f);
}
/*!
+1
View File
@@ -98,6 +98,7 @@ libH5Core_a_SOURCES = \
h5t_store_tetm.c \
h5t_store_trim.c \
h5t_tags.c \
h5u_model.c \
h5u_readwrite.c
libH5Core_a_DEPENDENCIES = $(EXTRA_HEADERS)
+1 -1
View File
@@ -134,7 +134,7 @@ h5_get_attrib_info (
*/
h5_ssize_t
h5_get_num_attribs (
h5_file_t* const f, /*!< handle to open file */
h5_file_t* f, /*!< handle to open file */
const hid_t id
) {
CHECK_FILEHANDLE (f);
+6 -53
View File
@@ -1,6 +1,12 @@
#ifndef __H5_CORE_PRIVATE_H
#define __H5_CORE_PRIVATE_H
#define H5_DATANAME_LEN 128
#define H5_STEPNAME_LEN 128
#define H5_STEPNAME "Step"
#define H5_STEPWIDTH 1
#define H5_BLOCKNAME "Block"
#include "h5_types_private.h"
#include "h5_errorhandling_private.h"
@@ -22,63 +28,10 @@
#include "h5u_errorhandling_private.h"
#include "h5u_types_private.h"
#define H5PART_GROUPNAME_STEP "Step"
#define H5B_CONTAINER_GRPNAME "Block"
#define H5BLOCK_GROUPNAME_BLOCK H5B_CONTAINER_GRPNAME
#define TRY( func ) \
if ((int64_t)(ptrdiff_t)(func) <= (int64_t)H5_ERR) \
return H5_ERR;
/*!
The functions declared here are not part of the API, but may be used
in extensions like H5Block. We name these functions "private".
\note
Private function may change there interface even in stable versions.
Don't use them in applications!
*/
h5_int64_t
h5_get_num_particles (
h5_file_t* const f
);
herr_t
h5_iteration_operator (
hid_t group_id,
const char* member_name,
void* operator_data
);
h5_int64_t
_H5Part_get_object_name (
h5_file_t* const f,
hid_t group_id,
const char* group_name,
const hid_t type,
const h5_int64_t idx,
char* obj_name,
const h5_int64_t len_obj_name
);
char*
_H5Part_strdupfor2c (
const char* s,
const ssize_t len
);
char*
_H5Part_strc2for (
char* const str,
const ssize_t l_str
);
#ifdef IPL_XT3
# define SEEK_END 2
#endif
+3 -3
View File
@@ -43,20 +43,20 @@
h5_error( \
f, \
H5_ERR_INVAL, \
"Cannot store more than %lld %s", max, otype );
"Cannot store more than %lld %s", (long long)max, otype );
#define HANDLE_H5_PARENT_ID_ERR( f, otype, parent_id ) \
h5_error( \
f, \
H5_ERR_INVAL, \
"Impossible parent_id %lld for %s.", \
parent_id, otype );
(long long)parent_id, otype );
#define HANDLE_H5_OUT_OF_RANGE_ERR( f, otype, oid ) \
h5_error( \
f, \
H5_ERR_INVAL, \
"%s id %lld out of range", \
otype, oid );
otype, (long long)oid );
#endif
+1
View File
@@ -1,5 +1,6 @@
#include <assert.h>
#include <stdlib.h>
#include <math.h>
#include "h5_core.h"
+138 -2
View File
@@ -134,7 +134,7 @@ h5priv_get_num_objs_in_hdf5_group (
/*
Get name of object given by index \c idx in grouop \c loc_id. If name is \c NULL,
Get name of object given by index \c idx in group \c loc_id. If name is \c NULL,
return size of name.
*/
ssize_t
@@ -306,6 +306,9 @@ h5priv_write_hdf5_dataset (
const hid_t xfer_prop,
const void* buf
) {
#ifdef PARALLEL_IO
TRY ( h5_start_throttle ( f ) );
#endif
herr_t herr = H5Dwrite (
dataset_id,
type_id,
@@ -320,6 +323,9 @@ h5priv_write_hdf5_dataset (
"Write to dataset \"%s\" failed.", \
h5_get_objname (dataset_id));
#ifdef PARALLEL_IO
TRY ( h5_end_throttle ( f ) );
#endif
return H5_SUCCESS;
}
@@ -336,6 +342,9 @@ h5priv_read_hdf5_dataset (
const hid_t xfer_prop,
void* const buf ) {
#ifdef PARALLEL_IO
TRY ( h5_start_throttle ( f ) );
#endif
herr_t herr = H5Dread (
dataset_id,
type_id,
@@ -350,6 +359,9 @@ h5priv_read_hdf5_dataset (
"Error reading dataset \"%s\".",
h5_get_objname (dataset_id) );
#ifdef PARALLEL_IO
TRY ( h5_end_throttle ( f ) );
#endif
return H5_SUCCESS;
}
@@ -466,6 +478,33 @@ h5priv_select_hyperslab_of_hdf5_dataspace (
return H5_SUCCESS;
}
herr_t
h5priv_select_elements_of_hdf5_dataspace (
h5_file_t* const f,
hid_t space_id,
H5S_seloper_t op,
hsize_t nelems,
const hsize_t* indices
) {
herr_t herr;
if ( nelems > 0 ) {
herr = H5Sselect_elements (
space_id,
op,
nelems,
indices);
} else {
herr = H5Sselect_none ( space_id );
}
if (herr < 0)
return h5_error (
f,
H5_ERR_HDF5,
"Cannot set select hyperslap region or add the "
"specified region");
return H5_SUCCESS;
}
hssize_t
h5priv_get_selected_npoints_of_hdf5_dataspace (
h5_file_t* const f,
@@ -660,7 +699,7 @@ h5priv_set_hdf5_chunk_property (
h5_file_t* const f,
hid_t plist,
int rank,
const hsize_t* dims
hsize_t* dims
) {
if (H5Pset_chunk (plist, rank, dims) < 0)
return h5_error (
@@ -671,6 +710,21 @@ h5priv_set_hdf5_chunk_property (
return H5_SUCCESS;
}
herr_t
h5priv_set_hdf5_layout_property (
h5_file_t* const f,
hid_t plist,
H5D_layout_t layout
) {
if (H5Pset_layout (plist, layout) < 0)
return h5_error (
f,
H5_ERR_HDF5,
"Cannot add layout property to list.");
return H5_SUCCESS;
}
#ifdef PARALLEL_IO
h5_err_t
h5priv_set_hdf5_fapl_mpio_property (
@@ -688,8 +742,90 @@ h5priv_set_hdf5_fapl_mpio_property (
"file access property list.");
return H5_SUCCESS;
}
h5_err_t
h5priv_set_hdf5_fapl_mpiposix_property (
h5_file_t* const f,
hid_t fapl_id,
MPI_Comm comm,
hbool_t use_gpfs
) {
herr_t herr = H5Pset_fapl_mpio (fapl_id, comm, use_gpfs);
if (herr < 0)
h5_error (
f,
H5_ERR_HDF5,
"Cannot store IO communicator information to the "
"file access property list.");
return H5_SUCCESS;
}
h5_err_t
h5priv_set_hdf5_dxpl_mpio_property (
h5_file_t* const f,
hid_t dxpl_id,
H5FD_mpio_xfer_t mode
) {
herr_t herr = H5Pset_dxpl_mpio (dxpl_id, mode);
if (herr < 0)
h5_error (
f,
H5_ERR_HDF5,
"Cannot store IO communicator information to the "
"dataset transfer property list.");
return H5_SUCCESS;
}
#endif
h5_err_t
h5priv_set_hdf5_mdc_property (
h5_file_t* const f,
hid_t fapl_id,
H5AC_cache_config_t *config
) {
herr_t herr = H5Pset_mdc_config (fapl_id, config);
if (herr < 0)
h5_error (
f,
H5_ERR_HDF5,
"Cannot store metadata cache configuration in the "
"file access property list.");
return H5_SUCCESS;
}
h5_err_t
h5priv_get_hdf5_mdc_property (
h5_file_t* const f,
hid_t fapl_id,
H5AC_cache_config_t *config
) {
herr_t herr = H5Pget_mdc_config (fapl_id, config);
if (herr < 0)
h5_error (
f,
H5_ERR_HDF5,
"Cannot get metadata cache configuration in the "
"file access property list.");
return H5_SUCCESS;
}
h5_err_t
h5priv_set_hdf5_alignment_property (
h5_file_t* const f,
hid_t fapl_id,
hsize_t threshold,
hsize_t alignment
) {
herr_t herr = H5Pset_alignment (fapl_id, threshold, alignment);
if (herr < 0)
h5_error (
f,
H5_ERR_HDF5,
"Cannot set alignment in the "
"file access property list.");
return H5_SUCCESS;
}
h5_err_t
h5priv_close_hdf5_property (
h5_file_t* const f,
+54 -1
View File
@@ -142,6 +142,15 @@ h5priv_select_hyperslab_of_hdf5_dataspace (
const hsize_t* block
);
herr_t
h5priv_select_elements_of_hdf5_dataspace (
h5_file_t* const f,
hid_t space_id,
H5S_seloper_t op,
hsize_t nelems,
const hsize_t* indices
);
hssize_t
h5priv_get_selected_npoints_of_hdf5_dataspace (
h5_file_t* const f,
@@ -211,7 +220,14 @@ h5priv_set_hdf5_chunk_property (
h5_file_t* const f,
hid_t plist,
int ndims,
const hsize_t* dim
hsize_t* dim
);
herr_t
h5priv_set_hdf5_layout_property (
h5_file_t* const f,
hid_t plist,
H5D_layout_t layout
);
#ifdef PARALLEL_IO
@@ -222,8 +238,45 @@ h5priv_set_hdf5_fapl_mpio_property (
MPI_Comm comm,
MPI_Info info
);
h5_err_t
h5priv_set_hdf5_fapl_mpiposix_property (
h5_file_t* const f,
hid_t fapl_id,
MPI_Comm comm,
hbool_t use_gpfs
);
h5_err_t
h5priv_set_hdf5_dxpl_mpio_property (
h5_file_t* const f,
hid_t dxpl_id,
H5FD_mpio_xfer_t mode
);
#endif
h5_err_t
h5priv_set_hdf5_mdc_property (
h5_file_t* const f,
hid_t fapl_id,
H5AC_cache_config_t *config
);
h5_err_t
h5priv_get_hdf5_mdc_property (
h5_file_t* const f,
hid_t fapl_id,
H5AC_cache_config_t *config
);
h5_err_t
h5priv_set_hdf5_alignment_property (
h5_file_t* const f,
hid_t fapl_id,
hsize_t threshold,
hsize_t alignment
);
h5_err_t
h5priv_close_hdf5_property (
h5_file_t* const f,
+1 -1
View File
@@ -206,7 +206,7 @@ h5priv_insert_idmap (
h5_id_t local_id
) {
if (map->num_items == map->size)
return HANDLE_H5_OVERFLOW_ERR (f, "g2lmap", map->size);
return HANDLE_H5_OVERFLOW_ERR (f, "g2lmap", (long long)map->size);
h5_id_t i = h5priv_search_idmap (map, global_id);
if (i >= 0) /* global id already in use ? */
+96 -7
View File
@@ -3,9 +3,96 @@
#ifdef PARALLEL_IO
#define ERR_GATHER "Cannot gather data."
#define ERR_COMM_SIZE "Cannot get number of processes in my group."
#define ERR_COMM_RANK "Cannot get rank of the calling process."
h5_err_t
h5priv_mpi_recv(
h5_file_t *f,
void* buf,
const int count,
const MPI_Datatype type,
const int from,
const int tag,
const MPI_Comm comm
) {
int err = MPI_Recv(
buf,
count,
type,
from,
tag,
comm,
MPI_STATUS_IGNORE
);
if (err != MPI_SUCCESS)
return h5_error (f, H5_ERR_MPI, "Cannot receive data");
return H5_SUCCESS;
}
h5_err_t
h5priv_mpi_send(
h5_file_t *f,
void* buf,
const int count,
const MPI_Datatype type,
const int to,
const int tag,
const MPI_Comm comm
) {
int err = MPI_Send(
buf,
count,
type,
to,
tag,
comm
);
if (err != MPI_SUCCESS)
return h5_error (f, H5_ERR_MPI, "Cannot send data");
return H5_SUCCESS;
}
h5_err_t
h5priv_mpi_sum (
h5_file_t* const f,
void* sendbuf,
void* recvbuf,
const int count,
const MPI_Datatype type,
const MPI_Comm comm
) {
int err = MPI_Allreduce(
sendbuf,
recvbuf,
count,
type,
MPI_SUM,
comm
);
if (err != MPI_SUCCESS)
return h5_error (f, H5_ERR_MPI, "Cannot perform sum reduction");
return H5_SUCCESS;
}
h5_err_t
h5priv_mpi_prefix_sum (
h5_file_t* const f,
void* sendbuf,
void* recvbuf,
const int count,
const MPI_Datatype type,
const MPI_Comm comm
) {
int err = MPI_Scan(
sendbuf,
recvbuf,
count,
type,
MPI_SUM,
comm
);
if (err != MPI_SUCCESS)
return h5_error (f, H5_ERR_MPI, "Cannot perform prefix sum");
return H5_SUCCESS;
}
h5_err_t
h5priv_mpi_allgather (
@@ -27,7 +114,7 @@ h5priv_mpi_allgather (
recvtype,
comm);
if (err != MPI_SUCCESS)
return h5_error (f, H5_ERR_MPI, ERR_GATHER);
return h5_error (f, H5_ERR_MPI, "Cannot gather data");
return H5_SUCCESS;
}
@@ -39,7 +126,7 @@ h5priv_mpi_comm_size (
) {
int err = MPI_Comm_size (comm, size);
if (err != MPI_SUCCESS)
return h5_error (f, H5_ERR_MPI, ERR_COMM_SIZE);
return h5_error (f, H5_ERR_MPI, "Cannot get communicator size");
return H5_SUCCESS;
}
@@ -52,7 +139,9 @@ h5priv_mpi_comm_rank (
) {
int err = MPI_Comm_rank (comm, rank);
if (err != MPI_SUCCESS)
return h5_error (f, H5_ERR_MPI, ERR_COMM_RANK);
return h5_error (f, H5_ERR_MPI, "Cannot get this task's rank");
return H5_SUCCESS;
}
#endif
#endif // PARALLEL_IO
+42
View File
@@ -2,6 +2,48 @@
#define __H5_MPI_PRIVATE_H
#ifdef PARALLEL_IO
h5_err_t
h5priv_mpi_recv(
h5_file_t *f,
void* buf,
const int count,
const MPI_Datatype type,
const int from,
const int tag,
const MPI_Comm comm
);
h5_err_t
h5priv_mpi_send(
h5_file_t *f,
void* buf,
const int count,
const MPI_Datatype type,
const int to,
const int tag,
const MPI_Comm comm
);
h5_err_t
h5priv_mpi_sum (
h5_file_t* const f,
void* sendbuf,
void* recvbuf,
const int count,
const MPI_Datatype type,
const MPI_Comm comm
);
h5_err_t
h5priv_mpi_prefix_sum (
h5_file_t* const f,
void* sendbuf,
void* recvbuf,
const int count,
const MPI_Datatype type,
const MPI_Comm comm
);
h5_err_t
h5priv_mpi_allgather (
h5_file_t* const f,
+99 -41
View File
@@ -3,6 +3,9 @@
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#if H5_LUSTRE
#include <lustre/liblustreapi.h>
#endif
#include "h5_core.h"
#include "h5_core_private.h"
@@ -63,13 +66,13 @@ h5upriv_open_file (
TRY( f->u = (h5u_fdata_t*)h5priv_alloc (f, NULL, sizeof (*f->u)) );
h5u_fdata_t *u = f->u;
u->shape = 0;
u->diskshape = H5S_ALL;
u->memshape = H5S_ALL;
u->viewstart = -1;
u->viewend = -1;
size_t size = f->nprocs * sizeof (h5_int64_t);
TRY( u->pnparticles = h5priv_alloc (f, NULL, size) );
u->viewindexed = 0;
TRY( u->dcreate_prop = h5priv_create_hdf5_property(f, H5P_DATASET_CREATE) );
return H5_SUCCESS;
}
@@ -84,7 +87,7 @@ h5upriv_open_file (
\return H5_SUCCESS or error code
*/
static h5_int64_t
h5bpriv_open_block (
h5bpriv_open_file (
h5_file_t * const f /*!< IN: file handle */
) {
h5b_fdata_t* b;
@@ -126,13 +129,13 @@ h5_err_t
h5priv_open_file (
h5_file_t* const f,
const char* filename, /*!< The name of the data file to open. */
h5_int32_t flags, /*!< The access mode for the file. */
char flags, /*!< The access mode for the file. */
MPI_Comm comm /*!< MPI communicator */
) {
h5_info (f, "Opening file %s.", filename);
TRY( h5priv_set_hdf5_errorhandler (f, H5E_DEFAULT, h5priv_error_handler, NULL) );
TRY( h5_set_stepname_fmt (f, H5PART_GROUPNAME_STEP, 0) );
TRY( h5_set_stepname_fmt (f, H5_STEPNAME, H5_STEPWIDTH) );
f->xfer_prop = f->create_prop = f->access_prop = H5P_DEFAULT;
@@ -145,41 +148,64 @@ h5priv_open_file (
TRY( h5priv_mpi_comm_size (f, comm, &f->nprocs) );
TRY( h5priv_mpi_comm_rank (f, comm, &f->myproc) );
/* for the SP2... perhaps different for linux */
MPI_Info info = MPI_INFO_NULL;
/* ks: IBM_large_block_io */
MPI_Info_create (&info);
MPI_Info_set (info, "IBM_largeblock_io", "true" );
TRY( h5priv_set_hdf5_fapl_mpio_property (f, f->access_prop, comm, info) );
MPI_Info_free (&info);
TRY( f->access_prop = h5priv_create_hdf5_property (f, H5P_FILE_ACCESS) );
/*TRY ( f->create_prop = h5priv_create_hdf5_property ( f, H5P_FILE_CREATE) );*/
f->create_prop = H5P_DEFAULT;
/* xfer_prop: also used for parallel I/O, during actual writes
rather than the access_prop which is for file creation. */
TRY( f->xfer_prop = h5priv_create_hdf5_property (f, H5P_DATASET_XFER) );
#ifdef COLLECTIVE_IO
if (H5Pset_dxpl_mpio (f->xfer_prop,H5FD_MPIO_COLLECTIVE) < 0) {
return HANDLE_H5P_SET_DXPL_MPIO_ERR;
TRY( f->xfer_prop = h5priv_create_hdf5_property(f, H5P_DATASET_XFER) );
TRY( f->access_prop = h5priv_create_hdf5_property(f, H5P_FILE_ACCESS) );
TRY( f->create_prop = h5priv_create_hdf5_property(f, H5P_FILE_CREATE) );
/* select the HDF5 VFD */
if (flags & H5_VFD_MPIPOSIX) {
h5_info(f, "Selecting MPI-POSIX VFD");
hbool_t use_gpfs = 0; // TODO autodetect GPFS?
TRY( h5priv_set_hdf5_fapl_mpiposix_property(f,
f->access_prop, comm, use_gpfs) );
} else {
h5_info(f, "Selecting MPI-IO VFD");
TRY( h5priv_set_hdf5_fapl_mpio_property(f,
f->access_prop, comm, MPI_INFO_NULL) );
if (flags & H5_VFD_INDEPENDENT) {
h5_info(f, "MPI-IO: Using independent mode");
} else {
h5_info(f, "MPI-IO: Using collective mode");
TRY( h5priv_set_hdf5_dxpl_mpio_property(f,
f->xfer_prop, H5FD_MPIO_COLLECTIVE) );
}
#endif /* COLLECTIVE_IO */
}
/* defer metadata writes */
H5AC_cache_config_t config;
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
TRY( h5priv_get_hdf5_mdc_property(f, f->access_prop, &config) );
config.set_initial_size = 1;
config.initial_size = 16 * 1024 * 1024;
config.evictions_enabled = 0;
config.incr_mode = H5C_incr__off;
config.decr_mode = H5C_decr__off;
config.flash_incr_mode = H5C_flash_incr__off;
TRY( h5priv_set_hdf5_mdc_property(f, f->access_prop, &config) );
#endif /* PARALLEL_IO */
if (flags == H5_O_RDONLY) {
#if H5_LUSTRE
// set alignment
lov_user_md lum;
llapi_file_get_stripe(filename, &lum);
h5_size_t stripe_size = (h5_size_t)lum.lmm_stripe_size;
h5info(f, "Found lustre stripe size of %llu bytes", stripe_size);
TRY( h5priv_set_hdf5_alignment_property(f,
f->access_prop, 0, stripe_size) );
#endif
if (flags & H5_O_RDONLY) {
f->file = H5Fopen (filename, H5F_ACC_RDONLY, f->access_prop);
}
else if (flags == H5_O_WRONLY){
else if (flags & H5_O_WRONLY){
f->file = H5Fcreate (filename, H5F_ACC_TRUNC, f->create_prop,
f->access_prop);
f->empty = 1;
}
else if (flags == H5_O_APPEND || flags == H5_O_RDWR) {
else if (flags & H5_O_APPEND || flags & H5_O_RDWR) {
int fd = open (filename, O_RDONLY, 0);
if ((fd == -1) && (errno == ENOENT)) {
f->file = H5Fcreate (filename, H5F_ACC_TRUNC,
@@ -208,6 +234,7 @@ h5priv_open_file (
TRY( f->root_gid = h5priv_open_group (f, f->file, "/" ));
f->mode = flags;
f->step_gid = -1;
f->throttle = 0;
sprintf (
f->step_name,
@@ -215,7 +242,7 @@ h5priv_open_file (
f->prefix_step_name, f->width_step_idx, (long long)f->step_idx);
TRY( h5upriv_open_file (f) );
TRY( h5bpriv_open_block (f) );
TRY( h5bpriv_open_file (f) );
TRY( h5tpriv_open_file (f) );
return H5_SUCCESS;
}
@@ -271,7 +298,7 @@ h5upriv_close_file (
struct h5u_fdata* u = f->u;
f->__errno = H5_SUCCESS;
if (u->shape > 0) {
if(u->shape != H5S_ALL) {
TRY( h5priv_close_hdf5_dataspace (f, u->shape) );
u->shape = 0;
}
@@ -283,9 +310,7 @@ h5upriv_close_file (
TRY( h5priv_close_hdf5_dataspace (f, u->memshape) );
u->memshape = 0;
}
if (u->pnparticles) {
free (u->pnparticles);
}
TRY( h5priv_close_hdf5_property (f, u->dcreate_prop) );
return f->__errno;
}
@@ -362,14 +387,16 @@ h5_err_t
h5_set_stepname_fmt (
h5_file_t* const f,
const char* name,
const h5_int64_t width
int width
) {
if (width < 0) width = 0;
else if (width > H5_STEPNAME_LEN - 1) width = H5_STEPNAME_LEN - 1;
strncpy (
f->prefix_step_name,
name,
sizeof (f->prefix_step_name) - 1);
f->width_step_idx = (int)width;
H5_STEPNAME_LEN - 1);
f->width_step_idx = width;
return H5_SUCCESS;
}
@@ -384,8 +411,8 @@ h5_err_t
h5_get_stepname_fmt (
h5_file_t* const f, /*!< Handle to file */
char* name, /*!< OUT: Prefix */
const h5_size_t l_name, /*!< length of buffer name */
h5_size_t* width /*!< OUT: Width of the number */
int l_name, /*!< length of buffer name */
int* width /*!< OUT: Width of the number */
) {
return h5_error_not_implemented (f, __FILE__, __func__, __LINE__);
}
@@ -403,7 +430,38 @@ h5_get_step (
) {
return h5_error_not_implemented (f, __FILE__, __func__, __LINE__);
}
/*!
\ingroup h5_core_filehandling
Get number of processes.
\return Number of processes or error code
*/
int
h5_get_num_procs (
h5_file_t* const f /*!< file handle */
) {
return f->nprocs;
}
/*!
\ingroup h5_core_filehandling
Get number of steps.
\return Number of steps or error code
*/
h5_size_t
h5_get_num_steps(
h5_file_t* const f /*!< file handle */
) {
return h5_get_num_hdf5_groups_matching_prefix (
f,
f->step_gid,
f->prefix_step_name);
}
/*!
\ingroup h5_core_filehandling
+4
View File
@@ -95,8 +95,12 @@ swapfunc(a, b, n, swaptype)
static inline char *
med3(char *a, char *b, char *c, cmp_t *cmp, void *thunk
#ifndef I_AM_QSORT_R
#if __GNUC__
__attribute__((unused))
#else
__unused
#endif
#endif
)
{
return CMP(thunk, a, b) < 0 ?
+98 -17
View File
@@ -1,3 +1,4 @@
#include <string.h>
#include "h5_core.h"
#include "h5_core_private.h"
@@ -17,23 +18,14 @@ h5_write_data (
) {
hid_t dset_id;
h5_info (f, "Writing dataset %s/%s.", h5_get_objname(group_id), name);
h5_info (f, "Writing dataset %s/%s.", h5_get_objname(group_id), name);
TRY( dset_id = h5priv_create_hdf5_dataset (
f,
group_id,
name,
type_id,
diskspace_id,
H5P_DEFAULT) );
TRY( h5priv_write_hdf5_dataset (
f,
dset_id,
type_id,
memspace_id,
diskspace_id,
f->xfer_prop,
array) );
TRY( h5priv_close_hdf5_dataset (f, dset_id) );
f,
group_id,
name,
type_id,
diskspace_id,
H5P_DEFAULT) );
f->empty = 0;
@@ -204,7 +196,7 @@ h5priv_close_step (
return H5_SUCCESS;
}
h5_err_t
static h5_err_t
_set_step (
h5_file_t* const f,
const h5_int64_t step_idx /*!< [in] Step to set. */
@@ -299,3 +291,92 @@ h5_has_index (
f->prefix_step_name, f->width_step_idx, (long long)step);
return (H5Gget_objinfo(f->file, name, 1, NULL) >= 0);
}
h5_err_t
h5_normalize_dataset_name (
const char *name,
char *name2
) {
if ( strlen(name) > H5_DATANAME_LEN ) {
strncpy ( name2, name, H5_DATANAME_LEN - 1 );
name2[H5_DATANAME_LEN-1] = '\0';
} else {
strcpy ( name2, name );
}
return H5_SUCCESS;
}
#ifdef PARALLEL_IO
h5_err_t
h5_set_throttle (
h5_file_t* f,
int factor
) {
if ( (f->mode & H5_VFD_INDEPENDENT) || (f->mode & H5_VFD_MPIPOSIX) ) {
f->throttle = factor;
h5_info (f,
"Throttling enabled with factor = %d", f->throttle );
} else {
h5_warn (f,
"Throttling is only permitted with the MPI-POSIX "
"or MPI-IO Independent VFD." );
}
return H5_SUCCESS;
}
h5_err_t
h5_start_throttle (
h5_file_t *f
) {
if (f->throttle > 0) {
int token = 1;
h5_info (f,
"Throttling with factor = %d",
f->throttle);
if (f->myproc / f->throttle > 0) {
h5_debug (f,
"[%d] throttle: waiting on token from %d",
f->myproc, f->myproc - f->throttle);
// wait to receive token before continuing with read
TRY( h5priv_mpi_recv(f,
&token, 1, MPI_INT,
f->myproc - f->throttle, // receive from previous proc
f->myproc, // use this proc id as message tag
f->comm
) );
}
h5_debug (f,
"[%d] throttle: received token",
f->myproc);
}
return H5_SUCCESS;
}
h5_err_t
h5_end_throttle (
h5_file_t *f
) {
if (f->throttle > 0) {
int token;
if (f->myproc + f->throttle < f->nprocs) {
// pass token to next proc
h5_debug (f,
"[%d] throttle: passing token to %d",
f->myproc, f->myproc + f->throttle);
TRY( h5priv_mpi_send(f,
&token, 1, MPI_INT,
f->myproc + f->throttle, // send to next proc
f->myproc + f->throttle, // use the id of the target as tag
f->comm
) );
}
}
return H5_SUCCESS;
}
#endif // PARALLEL_IO
+43
View File
@@ -1,6 +1,49 @@
#ifndef __H5_TYPES_PRIVATE_H
#define __H5_TYPES_PRIVATE_H
/**
\struct h5_file
This is an essentially opaque datastructure that
acts as the filehandle for all practical purposes.
It is created by H5PartOpenFile<xx>() and destroyed by
H5PartCloseFile().
*/
struct h5_file {
hid_t file; /* file id -> fid */
h5_int32_t mode; /* file access mode */
char empty;
h5_err_t __errno; /* error number */
const char * __funcname; /* H5Block/Fed/Part API function*/
/* MPI */
MPI_Comm comm; /* MPI communicator */
int nprocs; /* number of processors */
int myproc; /* The index of the processor
this process is running on. */
int throttle;
/* HDF5 */
hid_t xfer_prop; /* dataset transfer properties */
hid_t access_prop; /* file access properties */
hid_t create_prop; /* file create properties */
hid_t root_gid; /* id of root group */
hid_t step_gid; /* id of current step */
/* step internal data */
char prefix_step_name[H5_STEPNAME_LEN]; /* Prefix of step name */
int width_step_idx; /* pad step index with 0 up to this */
char step_name[2*H5_STEPNAME_LEN]; /* full step name */
h5_int64_t step_idx; /* step index */
int is_new_step;
struct h5u_fdata *u;
struct h5b_fdata *b;
struct h5t_fdata *t;
};
struct h5_idmap_el {
h5_id_t global_id;
h5_id_t local_id;
+2 -2
View File
@@ -16,13 +16,13 @@ h5tpriv_error_local_elem_nexist (
char s[1024];
int num_chars_printed = snprintf (s, sizeof(s), "%lld,",
local_vertex_indices[0]);
(long long)local_vertex_indices[0]);
int i;
int num_vertices = t->ref_element->num_faces[0];
for (i = 1; i < num_vertices; i++) {
num_chars_printed += snprintf (
s + num_chars_printed, sizeof (s) - num_chars_printed,
"%lld,", local_vertex_indices[i]);
"%lld,", (long long)local_vertex_indices[i]);
if ((sizeof (s) - num_chars_printed) < 32) {
// buffer to small
return h5_error_internal (f, __FILE__, __func__, __LINE__);
+5 -5
View File
@@ -24,14 +24,14 @@ h5tpriv_error_local_elem_nexist (
h5_error( \
f, \
H5_ERR_INVAL, \
"Level %lld doesn't exist.", level_id );
"Level %lld doesn't exist.", (long long)level_id );
#define h5tpriv_error_global_id_nexist( f, name, id ) \
h5_error( \
f, \
H5_ERR_NOENTRY, \
"%s with global id %lld does not exist!", \
name, id );
name, (long long)id );
#define h5tpriv_error_global_triangle_id_nexist( f, vids ) \
@@ -39,14 +39,14 @@ h5tpriv_error_local_elem_nexist (
f, \
H5_ERR_NOENTRY, \
"Triangle with global vertex ids (%lld,%lld,%lld) doesn't exist!", \
vids[0], vids[1], vids[2] );
(long long)vids[0], (long long)vids[1], (long long)vids[2] );
#define h5tpriv_error_local_triangle_nexist( f, indices ) \
h5_error( \
f, \
H5_ERR_NOENTRY, \
"Triangle with global vertex ids (%lld,%lld,%lld) doesn't exist!", \
indices[0], indices[1], indices[2] );
(long long)indices[0], (long long)indices[1], (long long)indices[2] );
#define h5tpriv_error_store_boundaryface_local_id( f, local_fid ) \
@@ -54,6 +54,6 @@ h5tpriv_error_local_elem_nexist (
f, \
H5_ERR_INVAL, \
"Boundary face with local id %lld is not on level 0!", \
local_fid );
(long long)local_fid );
#endif
+1 -1
View File
@@ -455,7 +455,7 @@ h5t_open_mesh (
id = t->num_meshes;
}
t->mesh_type = type;
snprintf (t->mesh_name, sizeof (t->mesh_name), "%lld", id);
snprintf (t->mesh_name, sizeof (t->mesh_name), "%lld", (long long)id);
switch (type) {
case H5_OID_TETRAHEDRON:
+1 -1
View File
@@ -165,7 +165,7 @@ refine_tet (
f,
H5_ERR_INVAL,
"Tetrahedron %lld already refined.",
elem_idx );
(long long)elem_idx );
vertices[0] = tet->local_vertex_indices[0];
vertices[1] = tet->local_vertex_indices[1];
vertices[2] = tet->local_vertex_indices[2];
+1 -1
View File
@@ -163,7 +163,7 @@ refine_triangle (
f,
H5_ERR_INVAL,
"Element %lld already refined.",
elem_idx );
(long long)elem_idx );
vertices[0] = el->local_vertex_indices[0];
vertices[1] = el->local_vertex_indices[1];
+519
View File
@@ -0,0 +1,519 @@
#include "h5core/h5_core.h"
#include "h5_core_private.h"
h5_int64_t
h5u_get_num_particles (
h5_file_t *f /*!< [in] Handle to open file */
) {
h5_int64_t nparticles;
ssize_t exists;
/* returns 0 if there are no datasets on disk */
TRY ( exists = h5_get_num_hdf5_datasets(f, f->step_gid ) );
if ( exists == 0 )
{
/* try to recover number of particles from a previous
* H5PartSetNumParticles call. */
#ifdef PARALLEL_IO
hsize_t total;
TRY( h5priv_mpi_sum(f,
&(f->u->nparticles), &total,
1, MPI_LONG_LONG, f->comm) );
nparticles = (h5_int64_t)total;
#else
nparticles = (h5_int64_t)f->u->nparticles;
#endif
if ( nparticles > 0 ) {
h5_debug (
f,
"Using existing view to report "
"nparticles = %lld", (long long)nparticles );
return nparticles;
}
else {
h5_warn (
f,
"There are no datasets in the current timestep "
"nor existing views: "
"reporting 0 particles.");
return 0;
}
}
/* if a view exists, use its size as the number of particles */
if ( h5u_has_view ( f ) )
{
TRY( nparticles = h5priv_get_selected_npoints_of_hdf5_dataspace(
f,
f->u->diskshape) );
h5_debug(
f,
"Found %lld points with H5Sget_select_npoints",
(long long)nparticles );
}
/* otherwise, report all particles on disk in the first dataset
* for this timestep */
else
{
char dataset_name[H5_DATANAME_LEN];
TRY( h5priv_get_hdf5_objname_by_idx(
f,
f->step_gid,
0,
dataset_name,
H5_DATANAME_LEN) );
TRY( nparticles = h5priv_get_npoints_of_hdf5_dataset_by_name (
f,
f->step_gid,
dataset_name ) );
}
return nparticles;
}
h5_int64_t
h5u_set_num_particles (
h5_file_t *f, /*!< [in] Handle to open file */
h5_int64_t nparticles, /*!< [in] Number of particles */
h5_int64_t stride /*!< [in] Stride of particles in memory */
) {
CHECK_FILEHANDLE( f );
struct h5u_fdata *u = f->u;
hsize_t hstride;
hsize_t count;
hsize_t start;
hsize_t total;
hsize_t dmax = H5S_UNLIMITED;
if ( nparticles <= 0 )
return h5_error(
f,
H5_ERR_INVAL,
"Invalid number particles: %lld!\n",
(long long)nparticles);
/* prevent invalid stride value */
if (stride < 1)
{
h5_warn (
f,
"Stride < 1 was specified: changing to 1." );
hstride = 1;
} else {
hstride = (hsize_t)stride;
}
#ifndef PARALLEL_IO
/*
if we are not using parallel-IO, there is enough information
to know that we can short circuit this routine. However,
for parallel IO, this is going to cause problems because
we don't know if things have changed globally
*/
if ( u->nparticles == nparticles && stride == 1 ) {
return H5_SUCCESS;
}
#endif
TRY( h5u_reset_view(f) );
TRY( h5priv_close_hdf5_dataspace( f, u->shape ) );
u->shape = H5S_ALL;
u->nparticles = (hsize_t)nparticles;
/* declare local memory datasize with striding */
count = u->nparticles * stride;
TRY( u->memshape = h5priv_create_hdf5_dataspace(f, 1, &count, &dmax) );
/* we need a hyperslab selection if there is striding
* (otherwise, the default H5S_ALL selection is ok)
*/
if ( hstride > 1 )
{
start = 0;
count = u->nparticles;
TRY( h5priv_select_hyperslab_of_hdf5_dataspace(
f,
u->memshape,
H5S_SELECT_SET,
&start,
&hstride,
&count, NULL ) );
}
#ifndef PARALLEL_IO
count = u->nparticles;
TRY( u->shape = h5priv_create_hdf5_dataspace (
f,
1,
&count,
NULL ) );
u->viewstart = 0;
u->viewend = nparticles - 1; // view range is *inclusive*
#else /* PARALLEL_IO */
/*
The Gameplan here is to declare the overall size of the on-disk
data structure the same way we do for the serial case. But
then we must have additional "DataSpace" structures to define
our in-memory layout of our domain-decomposed portion of the particle
list as well as a "selection" of a subset of the on-disk
data layout that will be written in parallel to mutually exclusive
regions by all of the processors during a parallel I/O operation.
These are f->shape, f->memshape and f->diskshape respectively.
*/
/*
acquire the number of particles to be written from each MPI process
*/
TRY( h5priv_mpi_sum(f,
&(u->nparticles), &total, 1, MPI_LONG_LONG, f->comm ) );
TRY( h5priv_mpi_prefix_sum(f,
&(u->nparticles), &start, 1, MPI_LONG_LONG, f->comm ) );
u->viewstart = start;
u->viewend = start + u->nparticles - 1; // view range is *inclusive*
/* declare overall datasize */
count = total;
TRY ( u->shape = h5priv_create_hdf5_dataspace (f, 1, &count, NULL) );
/* declare overall data size but then will select a subset */
TRY ( u->diskshape = h5priv_create_hdf5_dataspace (f, 1, &count, NULL) );
count = nparticles;
hstride = 1;
TRY ( h5priv_select_hyperslab_of_hdf5_dataspace (
f,
u->diskshape,
H5S_SELECT_SET,
&start, &hstride, &count,
NULL ) );
#endif
return H5_SUCCESS;
}
h5_int64_t
h5u_has_view (
h5_file_t *f
) {
return ( f->u->viewindexed || ( f->u->viewstart >= 0 && f->u->viewend >= 0 ));
}
h5_err_t
h5u_reset_view (
h5_file_t *f
) {
struct h5u_fdata *u = f->u;
u->viewstart = -1;
u->viewend = -1;
u->viewindexed = 0;
TRY( h5priv_close_hdf5_dataspace( f, u->diskshape ) );
u->diskshape = H5S_ALL;
TRY( h5priv_close_hdf5_dataspace( f, u->memshape ) );
u->memshape = H5S_ALL;
return H5_SUCCESS;
}
h5_err_t
h5u_set_view (
h5_file_t *f, /*!< [in] Handle to open file */
h5_int64_t start, /*!< [in] Start particle */
h5_int64_t end /*!< [in] End particle */
) {
h5_int64_t herr = 0;
hsize_t total;
hsize_t stride = 1;
hsize_t hstart;
hsize_t dmax = H5S_UNLIMITED;
struct h5u_fdata *u = f->u;
h5_debug (
f,
"Set view (%lld,%lld).",
(long long)start,(long long)end);
herr = h5u_reset_view ( f );
if ( herr < 0 ) return herr;
if ( start == -1 && end == -1 ) return H5_SUCCESS;
/*
View has been reset so H5PartGetNumParticles will tell
us the total number of particles.
For now, we interpret start=-1 to mean 0 and
end==-1 to mean end of file
*/
TRY ( total = (hsize_t) h5u_get_num_particles ( f ) );
if ( total == 0 ) {
/* No datasets have been created yet and no veiws are set.
* We have to leave the view empty because we don't know how
* many particles there should be! */
return H5_SUCCESS;
}
if ( start == -1 ) start = 0;
if ( end == -1 ) end = total - 1; // range is *inclusive*
h5_debug ( f, "Total nparticles=%lld", (long long)total );
/* so, is this selection inclusive or exclusive?
it appears to be inclusive for both ends of the range.
*/
if ( end < start ) {
h5_warn (
f,
"Nonfatal error. "
"End of view (%lld) is less than start (%lld).",
(long long)end, (long long)start );
end = start; /* ensure that we don't have a range error */
}
/* setting up the new view */
u->viewstart = start;
u->viewend = end;
u->nparticles = end - start + 1;
h5_debug ( f, "nparticles=%lld", (long long)u->nparticles );
/* declare overall data size but then will select a subset */
TRY ( u->diskshape = h5priv_create_hdf5_dataspace ( f, 1, &total, NULL ) );
total = (hsize_t)u->nparticles;
hstart = (size_t)start;
TRY ( h5priv_select_hyperslab_of_hdf5_dataspace (
f,
u->diskshape,
H5S_SELECT_SET,
&hstart, &stride, &total,
NULL ) );
/* declare local memory datasize */
TRY ( u->memshape = h5priv_create_hdf5_dataspace (
f, 1, &total, &dmax ) );
return H5_SUCCESS;
}
h5_int64_t
h5u_set_view_indices (
h5_file_t *f, /*!< [in] Handle to open file */
const h5_int64_t *indices, /*!< [in] List of indices */
h5_int64_t nelems /*!< [in] Size of list */
) {
hsize_t total;
hsize_t dmax = H5S_UNLIMITED;
struct h5u_fdata *u = f->u;
TRY ( h5u_reset_view ( f ) );
if ( indices == NULL ) {
h5_warn ( f,
"View indices array is null: reseting view." );
return H5_SUCCESS;
}
/*
View has been reset so H5PartGetNumParticles will tell
us the total number of particles.
For now, we interpret start=-1 to mean 0 and
end==-1 to mean end of file
*/
TRY ( total = (hsize_t) h5u_get_num_particles ( f ) );
if ( total == 0 ) {
/* No datasets have been created yet and no veiws are set.
* We have to leave the view empty because we don't know how
* many particles there should be! */
return H5_SUCCESS;
}
h5_debug ( f, "Total nparticles=%lld", (long long)total );
if ( total == 0 ) return H5_SUCCESS;
/* check length of list */
if ( nelems < 0 ) {
h5_warn (f,
"Array of view indices has length < 0: "
"resetting view.");
u->nparticles = 0;
} else {
u->nparticles = (hsize_t) nelems;
}
/* declare overall data size but then will select a subset */
TRY ( u->diskshape = h5priv_create_hdf5_dataspace ( f, 1, &total, NULL ) );
/* declare local memory datasize */
total = (size_t)u->nparticles;
TRY ( u->memshape = h5priv_create_hdf5_dataspace (
f, 1, &total, &dmax ) );
TRY ( h5priv_select_elements_of_hdf5_dataspace (
f,
u->diskshape,
H5S_SELECT_SET,
nelems, (hsize_t*)indices ) );
u->viewindexed = 1;
return H5_SUCCESS;
}
h5_int64_t
h5u_get_view (
h5_file_t *f,
h5_int64_t *start,
h5_int64_t *end
) {
struct h5u_fdata *u = f->u;
if ( u->viewindexed ) {
return h5_error (
f,
H5_ERR_INVAL,
"The current view has an index selection, but "
"this function only works for ranged views." );
}
h5_int64_t viewstart = 0;
h5_int64_t viewend = 0;
if ( u->viewstart >= 0 )
viewstart = u->viewstart;
if ( u->viewend >= 0 ) {
viewend = u->viewend;
}
else {
TRY ( viewend = h5u_get_num_particles ( f ) );
}
if ( start ) *start = viewstart;
if ( end ) *end = viewend;
return viewend - viewstart + 1; // view range is *inclusive*
}
h5_int64_t
h5u_set_canonical_view (
h5_file_t *f
) {
TRY( h5u_reset_view ( f ) );
h5_int64_t start = 0;
h5_int64_t end = 0;
h5_int64_t total = 0;
TRY( total = h5u_get_num_particles ( f ) );
#ifdef PARALLEL_IO
h5_int64_t remainder = 0;
f->u->nparticles = total / f->nprocs;
remainder = total % f->nprocs;
start = f->myproc * f->u->nparticles;
/* distribute the remainder */
if ( f->myproc < remainder ) f->u->nparticles++;
/* adjust the offset */
if ( f->myproc < remainder ) start += f->myproc;
else start += remainder;
end = start + f->u->nparticles - 1;
#else
f->u->nparticles = total;
end = total - 1;
#endif // PARALLEL_IO
TRY( h5u_set_view ( f, start, end ) );
return H5_SUCCESS;
}
h5_int64_t
h5u_get_num_datasets (
h5_file_t *f /*!< [in] Handle to open file */
) {
ssize_t n;
TRY ( n = h5_get_num_hdf5_datasets(f, f->step_gid ) );
return (h5_int64_t)n;
}
/*!
Get information about dataset in current index given by its index
*/
h5_int64_t
h5u_get_dataset_info (
h5_file_t *f, /*!< [in] Handle to open file */
const h5_int64_t idx,/*!< [in] Index of the dataset */
char *dataset_name, /*!< [out] Name of dataset */
const h5_int64_t len_dataset_name,
/*!< [in] Size of buffer \c dataset_name */
h5_int64_t *type, /*!< [out] Type of data in dataset */
h5_int64_t *nelem /*!< [out] Number of elements. */
) {
TRY( h5_get_hdf5_datasetname_by_idx (
f,
f->step_gid,
idx,
dataset_name, len_dataset_name) );
if ( nelem ) {
TRY( *nelem = h5priv_get_npoints_of_hdf5_dataset_by_name (
f,
f->step_gid,
dataset_name) );
if ( *nelem < 0 ) return *nelem;
}
if ( type ) {
*type = h5_get_dataset_type( f, f->step_gid, dataset_name );
if ( *type < 0 ) return *type;
}
return H5_SUCCESS;
}
h5_err_t
h5u_set_chunk (
h5_file_t *f,
h5_size_t size
) {
if ( f->u->dcreate_prop == H5P_DEFAULT ) {
TRY( f->u->dcreate_prop = h5priv_create_hdf5_property(f,
H5P_DATASET_CREATE) );
}
if ( size == 0 )
{
h5_info(f, "Disabling chunking" );
TRY( h5priv_set_hdf5_layout_property(f,
f->u->dcreate_prop, H5D_CONTIGUOUS) );
} else
{
h5_info(f, "Setting chunk size to %lld particles", (long long)size);
TRY( h5priv_set_hdf5_chunk_property(f,
f->u->dcreate_prop, 1, (hsize_t*)&size) );
}
return H5_SUCCESS;
}
+119 -430
View File
@@ -1,264 +1,109 @@
#include "h5core/h5_core.h"
#include "h5_core_private.h"
h5_int64_t
h5u_has_view (
h5_file_t *f
) {
return ( f->u->viewstart >= 0 ) && ( f->u->viewend >= 0 );
}
static hid_t
_get_diskshape_for_reading (
h5_file_t *f,
hid_t dataset
) {
struct h5u_fdata *u = f->u;
hid_t space;
TRY( space = h5priv_get_hdf5_dataset_space ( f, dataset ) );
if ( h5u_has_view ( f ) ) {
hsize_t stride;
hsize_t count;
#ifdef HDF5V160
hssize_t start;
#else
hsize_t start;
#endif
/* so, is this selection inclusive or exclusive? */
start = u->viewstart;
count = u->viewend - u->viewstart; /* to be inclusive */
stride=1;
/* now we select a subset */
if ( u->diskshape > 0 ) {
TRY ( h5priv_select_hyperslab_of_hdf5_dataspace (
f,
u->diskshape,
H5S_SELECT_SET,
&start, &stride, &count,
NULL ) );
}
TRY ( h5priv_select_hyperslab_of_hdf5_dataspace (
f,
space,
H5S_SELECT_SET,
&start, &stride, &count,
NULL ) );
h5_debug (
f,
"Selection: range=%d:%d, npoints=%d s=%d",
(int)u->viewstart,(int)u->viewend,
(int)H5Sget_simple_extent_npoints(space),
(int)H5Sget_select_npoints(space) );
}
return space;
}
static hid_t
_get_memshape_for_reading (
h5_file_t *f,
hid_t dataset
) {
struct h5u_fdata *u = f->u;
if ( h5u_has_view ( f ) ) {
hsize_t dmax=H5S_UNLIMITED;
hsize_t len = u->viewend - u->viewstart;
return h5priv_create_hdf5_dataspace ( f, 1, &len, &dmax );
} else {
return H5S_ALL;
}
}
h5_int64_t
h5u_get_num_elems (
h5_file_t *f /*!< [in] Handle to open file */
) {
hid_t space_id;
hid_t dataset_id;
char dataset_name[128];
char step_name[128];
hsize_t nparticles;
/* Get first dataset in current time-step */
sprintf (
step_name,
"%s#%0*lld",
f->prefix_step_name, f->width_step_idx, (long long) f->step_idx );
TRY( h5_get_hdf5_datasetname_by_idx (
f,
f->step_gid,
0,
dataset_name, sizeof (dataset_name)) );
TRY( dataset_id = h5priv_open_hdf5_dataset ( f, f->step_gid, dataset_name ) );
TRY( space_id = _get_diskshape_for_reading ( f, dataset_id ) );
if ( h5u_has_view ( f ) ) {
TRY ( nparticles = h5priv_get_selected_npoints_of_hdf5_dataspace (
f, space_id ) );
}
else {
TRY ( nparticles = h5priv_get_npoints_of_hdf5_dataspace (
f, space_id ) );
}
TRY( h5priv_close_hdf5_dataspace( f, space_id ) );
TRY( h5priv_close_hdf5_dataset( f, dataset_id ) );
return (h5_int64_t) nparticles;
}
h5_int64_t
h5u_read_elems (
h5_err_t
h5u_read_data (
h5_file_t *f, /*!< [in] Handle to open file */
const char *name, /*!< [in] Name to associate dataset with */
void *array, /*!< [out] Array of data */
void *data, /*!< [out] Array of data */
const hid_t type
) {
struct h5u_fdata *u = f->u;
hid_t dataset_id;
hid_t space_id;
hid_t memspace_id;
hsize_t ndisk, nread, nmem;
if ( f->step_gid < 0 ) {
TRY( h5_set_step ( f, f->step_idx ) );
}
TRY( (dataset_id = h5priv_open_hdf5_dataset ( f, f->step_gid, name ) ) );
TRY( (space_id = _get_diskshape_for_reading ( f, dataset_id ) ) );
TRY( (memspace_id = _get_memshape_for_reading ( f, dataset_id ) ) );
char name2[H5_DATANAME_LEN];
TRY ( h5_normalize_dataset_name ( name, name2 ) );
TRY( (dataset_id = h5priv_open_hdf5_dataset ( f, f->step_gid, name2 ) ) );
/* default spaces, if not using a view selection */
memspace_id = H5S_ALL;
TRY ( space_id = h5priv_get_hdf5_dataset_space(f, dataset_id) );
/* get the number of elements on disk for the datset */
TRY ( ndisk = h5priv_get_npoints_of_hdf5_dataspace(f, space_id) );
if ( u->diskshape != H5S_ALL )
{
TRY ( nread = h5priv_get_npoints_of_hdf5_dataspace(f, u->diskshape) );
/* make sure the disk space selected by the view doesn't
* exceed the size of the dataset */
if ( nread <= ndisk ) {
/* we no longer need the dataset space... */
TRY ( h5priv_close_hdf5_dataspace(f, space_id) );
/* ...because it's safe to use the view selection */
space_id = f->u->diskshape;
} else {
/* the view selection is too big?
* fall back to using the dataset space */
h5_warn (
f,
"Ignoring view: dataset[%s] has fewer "
"elements on disk (%lld) than are selected "
"(%lld).",
name2, (long long)ndisk, (long long)nread );
nread = ndisk;
}
}
else {
/* since the view selection is H5S_ALL, we will
* read all available elements in the dataset space */
nread = ndisk;
}
if ( u->memshape != H5S_ALL )
{
TRY ( nread = h5priv_get_npoints_of_hdf5_dataspace(f, u->memshape) );
/* make sure the memory space selected by the view has
* enough capacity for the read */
if ( nmem >= nread ) {
memspace_id = f->u->memshape;
} else {
/* the view selection is too small?
* fall back to using H5S_ALL */
h5_warn (
f,
"Ignoring view: dataset[%s] has more "
"elements selected (%lld) than are available "
"in memory (%lld).",
name2, (long long)nread, (long long)nmem );
memspace_id = H5S_ALL;
}
}
TRY( h5priv_read_hdf5_dataset (
f,
dataset_id,
type,
memspace_id,
space_id,
f->xfer_prop,
array ) );
TRY( h5priv_close_hdf5_dataspace( f, space_id ) );
TRY( h5priv_close_hdf5_dataspace( f, memspace_id ) );
f,
dataset_id,
type,
memspace_id,
space_id,
f->xfer_prop,
data ) );
if ( space_id != f->u->diskshape ) {
TRY( h5priv_close_hdf5_dataspace( f, space_id ) );
}
TRY( h5priv_close_hdf5_dataset ( f, dataset_id ) );
return H5_SUCCESS;
}
h5_int64_t
h5u_set_num_elements (
h5_file_t *f, /*!< [in] Handle to open file */
h5_int64_t nparticles /*!< [in] Number of particles */
) {
struct h5u_fdata *u = f->u;
CHECK_FILEHANDLE( f );
#ifndef PARALLEL_IO
/*
if we are not using parallel-IO, there is enough information
to know that we can short circuit this routine. However,
for parallel IO, this is going to cause problems because
we don't know if things have changed globally
*/
if ( u->nparticles == nparticles ) {
return H5_SUCCESS;
}
#endif
TRY( h5priv_close_hdf5_dataspace( f, u->diskshape ) );
TRY( h5priv_close_hdf5_dataspace( f, u->memshape ) );
TRY( h5priv_close_hdf5_dataspace( f, u->shape ) );
u->diskshape = H5S_ALL;
u->memshape = H5S_ALL;
u->shape = H5S_ALL;
u->nparticles =(hsize_t) nparticles;
#ifndef PARALLEL_IO
TRY( u->shape = h5priv_create_hdf5_dataspace (
f,
1,
&(u->nparticles),
NULL ) );
#else /* PARALLEL_IO */
/*
The Gameplan here is to declare the overall size of the on-disk
data structure the same way we do for the serial case. But
then we must have additional "DataSpace" structures to define
our in-memory layout of our domain-decomposed portion of the particle
list as well as a "selection" of a subset of the on-disk
data layout that will be written in parallel to mutually exclusive
regions by all of the processors during a parallel I/O operation.
These are f->shape, f->memshape and f->diskshape respectively.
*/
hsize_t start[1];
hsize_t stride[1];
hsize_t count[1];
hsize_t total;
hsize_t dmax = H5S_UNLIMITED;
register int i;
/*
acquire the number of particles to be written from each MPI process
*/
TRY ( h5priv_mpi_allgather (
f,
&nparticles, 1, MPI_LONG_LONG,
f->u->pnparticles, 1, MPI_LONG_LONG,
f->comm ) );
if ( f->myproc == 0 ) {
h5_debug ( f, "Particle offsets:" );
for(i=0;i<f->nprocs;i++)
h5_debug ( f,
"\tnp=%lld",
(long long) f->u->pnparticles[i] );
}
/* should I create a selection here? */
/* compute start offsets */
stride[0] = 1;
start[0] = 0;
for (i=0; i<f->myproc; i++) {
start[0] += f->u->pnparticles[i];
}
/* compute total nparticles */
total = 0;
for (i=0; i < f->nprocs; i++) {
total += f->u->pnparticles[i];
}
/* declare overall datasize */
TRY ( f->u->shape = h5priv_create_hdf5_dataspace ( f, 1, &total, &total ) );
/* declare overall data size but then will select a subset */
TRY ( f->u->diskshape = h5priv_create_hdf5_dataspace ( f, 1, &total, &total) );
/* declare local memory datasize */
TRY ( f->u->memshape = h5priv_create_hdf5_dataspace (
f, 1, &(f->u->nparticles), &dmax ) );
count[0] = nparticles;
TRY ( h5priv_select_hyperslab_of_hdf5_dataspace (
f,
f->u->diskshape,
H5S_SELECT_SET,
start, stride, count,
NULL ) );
if ( f->step_gid < 0 ) {
TRY ( h5_set_step ( f, 0 ) );
}
#endif
return H5_SUCCESS;
}
h5_int64_t
h5_err_t
h5u_write_data (
h5_file_t *f, /*!< IN: Handle to open file */
const char *name, /*!< IN: Name to associate array with */
const void *array, /*!< IN: Array to commit to disk */
const void *data, /*!< IN: Array to commit to disk */
const hid_t type /*!< IN: Type of data */
) {
@@ -266,210 +111,54 @@ h5u_write_data (
CHECK_WRITABLE_MODE( f );
CHECK_TIMEGROUP( f );
struct h5u_fdata *u = f->u;
hid_t dset_id;
return h5_write_data(
f,
name,
array,
type,
f->step_gid,
u->memshape,
u->diskshape );
}
char name2[H5_DATANAME_LEN];
TRY ( h5_normalize_dataset_name ( name, name2 ) );
h5_int64_t
h5u_reset_view (
h5_file_t *f
) {
if ( u->shape == H5S_ALL )
h5_warn(f, "The view is unset or invalid.");
struct h5u_fdata *u = f->u;
/* test for existing dataset */
H5E_BEGIN_TRY
dset_id = H5Dopen(f->step_gid, name2, H5P_DEFAULT);
H5E_END_TRY
u->viewstart = -1;
u->viewend = -1;
TRY( h5priv_close_hdf5_dataspace( f, u->shape ) );
u->shape = H5S_ALL;
TRY( h5priv_close_hdf5_dataspace( f, u->diskshape ) );
u->diskshape = H5S_ALL;
TRY( h5priv_close_hdf5_dataspace( f, u->memshape ) );
u->memshape = H5S_ALL;
return H5_SUCCESS;
}
h5_int64_t
h5u_set_view (
h5_file_t *f, /*!< [in] Handle to open file */
h5_int64_t start, /*!< [in] Start particle */
h5_int64_t end /*!< [in] End particle */
) {
h5_int64_t herr = 0;
hsize_t total;
hsize_t stride = 1;
hsize_t dmax = H5S_UNLIMITED;
struct h5u_fdata *u = f->u;
h5_debug (
f,
"Set view (%lld,%lld).",
(long long)start,(long long)end);
herr = h5u_reset_view ( f );
if ( herr < 0 ) return herr;
if ( start == -1 && end == -1 ) return H5_SUCCESS;
/*
View has been reset so H5PartGetNumParticles will tell
us the total number of particles.
For now, we interpret start=-1 to mean 0 and
end==-1 to mean end of file
*/
TRY ( total = (hsize_t) h5u_get_num_elems ( f ) );
if ( start == -1 ) start = 0;
if ( end == -1 ) end = total;
h5_debug ( f, "Total nparticles=%lld", (long long)total );
/* so, is this selection inclusive or exclusive?
it appears to be inclusive for both ends of the range.
*/
if ( end < start ) {
h5_warn (
if (dset_id > 0) {
h5_warn( f,
"Dataset %s/%s already exists",
h5_get_objname(f->step_gid), name2);
} else {
TRY( dset_id = h5priv_create_hdf5_dataset (
f,
"Nonfatal error. "
"End of view (%lld) is less than start (%lld).",
(long long)end, (long long)start );
end = start; /* ensure that we don't have a range error */
f->step_gid,
name2,
type,
u->shape,
H5P_DEFAULT) );
}
/* setting up the new view */
u->viewstart = start;
u->viewend = end;
u->nparticles = end - start + 1;
/* declare overall datasize */
TRY ( u->shape = h5priv_create_hdf5_dataspace ( f, 1, &total, &total ) );
/* declare overall data size but then will select a subset */
TRY ( u->diskshape= h5priv_create_hdf5_dataspace ( f, 1, &total, &total ) );
/* declare local memory datasize */
TRY ( u->memshape = h5priv_create_hdf5_dataspace (
f, 1, &(u->nparticles), &dmax ) );
TRY ( h5priv_select_hyperslab_of_hdf5_dataspace (
f,
u->diskshape,
H5S_SELECT_SET,
(hsize_t*)&start, &stride, &total,
NULL ) );
return H5_SUCCESS;
}
h5_int64_t
h5u_get_view (
h5_file_t *f,
h5_int64_t *start,
h5_int64_t *end
) {
struct h5u_fdata *u = f->u;
h5_int64_t viewstart = 0;
h5_int64_t viewend = 0;
if ( u->viewstart >= 0 )
viewstart = u->viewstart;
if ( u->viewend >= 0 ) {
viewend = u->viewend;
}
else {
viewend = h5u_get_num_elems ( f );
if ( viewend < 0 )
return HANDLE_H5_GET_NUM_PARTICLES_ERR ( f, viewend );
}
if ( start ) *start = viewstart;
if ( end ) *end = viewend;
return viewend - viewstart;
}
h5_int64_t
h5u_set_canonical_view (
h5_file_t *f
) {
h5_int64_t herr = h5u_reset_view ( f );
if ( herr < 0 ) return HANDLE_H5_SET_VIEW_ERR( f, herr, -1, -1 );
#ifdef PARALLEL_IO
h5_int64_t start = 0;
h5_int64_t end = 0;
h5_int64_t n = 0;
int i = 0;
TRY ( n = h5u_get_num_elems ( f ) );
/*
now lets query the attributes for this group to see if there
is a 'pnparticles' group that contains the offsets for the
processors.
*/
if ( h5_read_attrib (
TRY( h5_start_throttle(f) );
#endif
h5_info (f,
"Writing dataset %s/%s.",
h5_get_objname(f->step_gid), name2);
TRY( h5priv_write_hdf5_dataset (
f,
f->step_gid,
"pnparticles", f->u->pnparticles ) < 0) {
/*
Attribute "pnparticles" is not available. So
subdivide the view into NP mostly equal pieces
*/
n /= f->nprocs;
for ( i=0; i<f->nprocs; i++ ) {
f->u->pnparticles[i] = n;
}
}
for ( i = 0; i < f->myproc; i++ ){
start += f->u->pnparticles[i];
}
end = start + f->u->pnparticles[f->myproc] - 1;
TRY ( h5u_set_view ( f, start, end ) );
dset_id,
type,
u->memshape,
u->diskshape,
f->xfer_prop,
data) );
TRY( h5priv_close_hdf5_dataset (f, dset_id) );
#ifdef PARALLEL_IO
TRY( h5_end_throttle(f) );
#endif
return H5_SUCCESS;
}
/*!
Get information about dataset in current index given by its index
*/
h5_int64_t
h5u_get_dataset_info (
h5_file_t *f, /*!< [in] Handle to open file */
const h5_int64_t idx,/*!< [in] Index of the dataset */
char *dataset_name, /*!< [out] Name of dataset */
const h5_int64_t len_dataset_name,
/*!< [in] Size of buffer \c dataset_name */
h5_int64_t *type, /*!< [out] Type of data in dataset */
h5_int64_t *nelem /*!< [out] Number of elements. */
) {
TRY( h5_get_hdf5_datasetname_by_idx (
f,
f->step_gid,
idx,
dataset_name, len_dataset_name) );
if ( nelem ) {
*nelem = h5u_get_num_elems ( f );
if ( *nelem < 0 ) return *nelem;
}
if ( type ) {
*type = h5_get_dataset_type( f, f->step_gid, dataset_name );
if ( *type < 0 ) return *type;
}
f->empty = 0;
return H5_SUCCESS;
}
+3 -10
View File
@@ -6,20 +6,13 @@ struct h5u_fdata {
h5_int64_t viewstart; /* -1 if no view is available: A "view" looks */
h5_int64_t viewend; /* at a subset of the data. */
char viewindexed; /* flag whether this view is a list of indices */
/**
the number of particles in each processor.
With respect to the "VIEW", these numbers
can be regarded as non-overlapping subsections
of the particle array stored in the file.
So they can be used to compute the offset of
the view for each processor
*/
h5_int64_t *pnparticles;
hid_t shape;
hid_t diskshape;
hid_t memshape;
hid_t dcreate_prop;
};
typedef struct h5u_fdata h5u_fdata_t;
#endif
+11 -3
View File
@@ -21,8 +21,8 @@
h5_file_t *
H5OpenFile (
const char * filename,
const h5_int32_t oflag,
const MPI_Comm comm
h5_int32_t flag,
MPI_Comm comm
);
h5_err_t
@@ -42,7 +42,7 @@ H5GetStepNameFormat (
h5_file_t *f,
char *name,
const h5_size_t l_name,
h5_size_t *width
int *width
);
h5_err_t
@@ -105,4 +105,12 @@ H5GetErrno (
h5_file_t * const f
);
#ifdef PARALLEL_IO
h5_err_t
H5SetThrottle (
h5_file_t* f,
int factor
);
#endif
#endif
+1 -1
View File
@@ -1,7 +1,7 @@
#ifndef __H5_INQUIRY_H
#define __H5_INQUIRY_H
h5_size_t
int
H5GetNumNodes (
h5_file_t * const f
);
+1
View File
@@ -11,6 +11,7 @@
#include "h5_openclose.h"
#include "h5_readwrite.h"
#include "h5u_readwrite.h"
#include "h5u_model.h"
#include "h5t_core.h"
+14 -3
View File
@@ -23,15 +23,15 @@ h5_int64_t
h5_set_stepname_fmt (
h5_file_t * const f,
const char *name,
const h5_int64_t width
int width
);
h5_err_t
h5_get_stepname_fmt (
h5_file_t * const f,
char *name,
const h5_size_t l_name,
h5_size_t *width
int l_name,
int *width
);
h5_err_t
@@ -39,6 +39,16 @@ h5priv_close_step (
h5_file_t * const f
);
int
h5_get_num_procs (
h5_file_t* const f
);
h5_size_t
h5_get_num_steps (
h5_file_t* const f
);
h5_int64_t
h5_has_step (
h5_file_t * const f,
@@ -59,4 +69,5 @@ h5_err_t
h5_traverse_steps (
h5_file_t * f /*!< file handle */
);
#endif
+22
View File
@@ -37,4 +37,26 @@ h5_has_index (
h5_int64_t step /*!< [in] Step number to query */
);
h5_err_t
h5_normalize_dataset_name (
const char *name,
char *name2
);
h5_err_t
h5_set_throttle (
h5_file_t *f,
int factor
);
h5_err_t
h5_start_throttle (
h5_file_t *f
);
h5_err_t
h5_end_throttle (
h5_file_t *f
);
#endif
+9 -47
View File
@@ -12,10 +12,13 @@
H5_O_RDWR: dataset may exist
*/
#define H5_O_RDWR 0
#define H5_O_RDONLY 1
#define H5_O_WRONLY 2
#define H5_O_APPEND 3
#define H5_O_RDWR 0x01
#define H5_O_RDONLY 0x02
#define H5_O_WRONLY 0x04
#define H5_O_APPEND 0x08
#define H5_VFD_MPIPOSIX 0x10
#define H5_VFD_INDEPENDENT 0x20
#define H5_ID_T H5T_NATIVE_INT64
#define H5_FLOAT64_T H5T_NATIVE_DOUBLE
@@ -51,6 +54,8 @@ typedef h5_float64_t h5_coord3d_t[3];
struct h5_file;
typedef struct h5_file h5_file_t;
typedef h5_err_t (*h5_errorhandler_t)(
struct h5_file * const,
const char*,
@@ -69,49 +74,6 @@ typedef struct h5_idlist {
struct h5_idmap;
typedef struct h5_idmap h5_idmap_t;
/**
\struct h5_file
This is an essentially opaque datastructure that
acts as the filehandle for all practical purposes.
It is created by H5PartOpenFile<xx>() and destroyed by
H5PartCloseFile().
*/
struct h5_file {
hid_t file; /* file id -> fid */
unsigned mode; /* file access mode */
int empty;
h5_err_t __errno; /* error number */
const char * __funcname; /* H5Block/Fed/Part API function*/
/* MPI */
MPI_Comm comm; /* MPI communicator */
int nprocs; /* number of processors */
int myproc; /* The index of the processor
this process is running on. */
/* HDF5 */
hid_t xfer_prop; /* file transfer properties */
hid_t create_prop; /* file create properties */
hid_t access_prop; /* file access properties */
hid_t root_gid; /* id of root group */
hid_t step_gid; /* id of current step */
/* step internal data */
char prefix_step_name[256]; /* Prefix of step name */
int width_step_idx; /* pad step index with 0 up to this */
char step_name[128]; /* full step name */
h5_int64_t step_idx; /* step index */
int is_new_step;
struct h5u_fdata *u;
struct h5b_fdata *b;
struct h5t_fdata *t;
};
typedef struct h5_file h5_file_t;
enum h5_oid {
H5_OID_VERTEX = 1,
+3
View File
@@ -1,6 +1,9 @@
#ifndef __H5T_CORE_H
#define __H5T_CORE_H
#include <limits.h>
#include <stdint.h>
#include "h5t_adjacencies.h"
#include "h5t_inquiry.h"
#include "h5t_map.h"
+2 -2
View File
@@ -15,13 +15,13 @@ h5t_get_num_levels (
h5_size_t
h5t_get_num_elems (
h5_file_t * const f,
const h5_id_t cnode_id
const h5_id_t cnode
);
h5_size_t
h5t_get_num_vertices (
h5_file_t * const f,
const h5_id_t cnode_id
const h5_id_t cnode
);
h5_id_t
+74
View File
@@ -0,0 +1,74 @@
#ifndef __H5U_MODEL_H
#define __H5U_MODEL_H
h5_int64_t
h5u_get_num_particles (
h5_file_t *f
);
h5_int64_t
h5u_set_num_particles (
h5_file_t *f,
h5_int64_t nparticles,
h5_int64_t stride
);
h5_int64_t
h5u_has_view (
h5_file_t *f
);
h5_int64_t
h5u_reset_view (
h5_file_t *f
);
h5_int64_t
h5u_set_view (
h5_file_t *f,
h5_int64_t start,
h5_int64_t end
);
h5_int64_t
h5u_set_view_indices (
h5_file_t *f,
const h5_int64_t *indices,
h5_int64_t nelems
);
h5_int64_t
h5u_get_view (
h5_file_t *f,
h5_int64_t *start,
h5_int64_t *end
);
h5_int64_t
h5u_set_canonical_view (
h5_file_t *f
);
h5_int64_t
h5u_get_num_datasets (
h5_file_t *f
);
h5_int64_t
h5u_get_dataset_info (
h5_file_t *f,
const h5_int64_t idx,
char *dataset_name,
const h5_int64_t len_dataset_name,
h5_int64_t *type,
h5_int64_t *nelem
);
h5_err_t
h5u_set_chunk (
h5_file_t *f,
h5_size_t size
);
#endif
+2 -50
View File
@@ -2,24 +2,13 @@
#define __H5U_READWRITE_H
h5_int64_t
h5u_get_num_elems (
h5_file_t *f
);
h5_int64_t
h5u_read_elems (
h5u_read_data (
h5_file_t *f,
const char *name,
void *array,
const hid_t type
);
h5_int64_t
h5u_set_num_elements (
h5_file_t *f,
h5_int64_t nparticles
);
h5_int64_t
h5u_write_data (
h5_file_t *f,
@@ -28,42 +17,5 @@ h5u_write_data (
const hid_t type
);
h5_int64_t
h5u_has_view (
h5_file_t *f
);
h5_int64_t
h5u_reset_view (
h5_file_t *f
);
h5_int64_t
h5u_set_view (
h5_file_t *f,
h5_int64_t start,
h5_int64_t end
);
h5_int64_t
h5u_get_view (
h5_file_t *f,
h5_int64_t *start,
h5_int64_t *end
);
h5_int64_t
h5u_set_canonical_view (
h5_file_t *f
);
h5_int64_t
h5u_get_dataset_info (
h5_file_t *f,
const h5_int64_t idx,
char *dataset_name,
const h5_int64_t len_dataset_name,
h5_int64_t *type,
h5_int64_t *nelem
);
#endif
+7 -6
View File
@@ -42,7 +42,7 @@ _write_data (
h5_int64_t j_dims = layout->j_end - layout->j_start + 1;
h5_int64_t k_dims = layout->k_end - layout->k_start + 1;
printf ( "Writing Step #%lld\n", (long long)f->step_idx );
//printf ( "Writing Step #%lld\n", (long long)f->step_idx );
data = malloc ( i_dims * j_dims * k_dims * sizeof ( *data ) );
for ( i = 0; i < i_dims; i++ ) {
@@ -130,21 +130,22 @@ _write_file (
myproc, fname );
#ifdef PARALLEL_IO
f = H5PartOpenFileParallel (
f = H5OpenFile (
fname,
H5_O_WRONLY,
comm
);
#else
f = H5PartOpenFile (
f = H5OpenFile (
fname,
H5_O_WRONLY
H5_O_WRONLY,
0
);
#endif
if ( f == NULL ) return -1;
herr = H5PartSetStep ( f, timestep );
herr = H5SetStep ( f, timestep );
if ( herr < 0 ) return herr;
if ( _write_data ( f, myproc, layout ) < 0 ) {
@@ -179,7 +180,7 @@ _read_data (
h5_int64_t j_dims = layout->j_end - layout->j_start + 1;
h5_int64_t k_dims = layout->k_end - layout->k_start + 1;
printf ( "Reading Step #%lld\n", (long long)f->step_idx );
//printf ( "Reading Step #%lld\n", (long long)f->step_idx );
data = malloc ( i_dims * j_dims * k_dims * sizeof ( *data ) );