repo migration

This commit is contained in:
2006-09-11 20:19:10 +00:00
commit 41511462f9
167 changed files with 64535 additions and 0 deletions
+235
View File
@@ -0,0 +1,235 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
/* #include <mpio.h> */
#include <unistd.h>
#include <sys/types.h>
#ifndef PARALLEL_IO
#define PARALLEL_IO
#endif
#ifndef DISABLE_H5PART
#include "H5Part.h"
#endif
#define FILENAME "testio"
/* normally 64 steps for real benchmark */
#define NSTEPS 5
/* normally 51e6 for real benchmark */
#define NPARTICLES 51e4
#define NTRIALS 3
/*
bench <nParticles>
*/
int main(int argc,char *argv[]){
MPI_Info info;
int npdims=1;
int nprocs,rank;
int trial;
int i,j,n; /* iteration variables */
double starttime,curtime, endtime;
int nparticles = atoi(argv[1]);
double *x,*y,*z,*px,*py,*pz;
typedef double *ddouble;
ddouble data[6];
int64_t *id;
MPI_Datatype chunktype;
int offset;
int localnp;
int err1,err2;
char filename[128]; /*= FILENAME; */
#ifndef DISABLE_H5PART
H5PartFile *f;
#endif
char newfilename[128];
FILE *fd;
MPI_File file;
MPI_Info bogusinfo;
MPI_Offset foffset;
MPI_Comm comm,dcomm = MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_rank(dcomm,&rank);
MPI_Comm_size(dcomm,&nprocs);
localnp=nparticles/(int64_t)nprocs;
for(offset=0,i=0;i<rank;i++){
offset+=localnp;
}
data[0]=x=(double*)malloc(sizeof(double)*(size_t)localnp);
data[1]=y=(double*)malloc(sizeof(double)*(size_t)localnp);
data[2]=z=(double*)malloc(sizeof(double)*(size_t)localnp);
data[3]=px=(double*)malloc(sizeof(double)*(size_t)localnp);
data[4]=py=(double*)malloc(sizeof(double)*(size_t)localnp);
data[5]=pz=(double*)malloc(sizeof(double)*(size_t)localnp);
/* printf("about to call create subarray with nparticles=%u localnp=%u offset=%u\n",
nparticles,localnp,offset); */
MPI_Type_create_subarray(1, /* rank */
&nparticles, /* size of the global array */
&localnp, /* size of my local chunk */
&offset, /* offset of this chunk in global */
MPI_ORDER_FORTRAN, /* fortran storage order */
MPI_DOUBLE,
&chunktype);
MPI_Type_commit(&chunktype);
MPI_Info_create(&info);
if(rank==0) printf("Nprocs=%u Particles=%u*6attribs*sizeof(double) Particles/proc=%u Nsteps=%u Ntrials=%u\n",
nprocs,nparticles,localnp,NSTEPS,NTRIALS);
for(trial=0;trial<NTRIALS;trial++){
if(rank==0) printf("---------------------- Trial %u of %u ---------------------\n",trial+1,NTRIALS);
MPI_Barrier(MPI_COMM_WORLD); /* to prevent unlink from interfering with file open */
sprintf(filename,"%s.%u.mpio.dat",FILENAME,nprocs);
if(rank==0) unlink(filename);
MPI_Barrier(MPI_COMM_WORLD); /* to prevent unlink from interfering with file open */
MPI_File_open(MPI_COMM_WORLD,filename,
MPI_MODE_CREATE | MPI_MODE_RDWR,
info,&file);
MPI_File_set_view(file,0,MPI_DOUBLE,chunktype,"native",info);
/* now a barrier to get the start timers roughly synced*/
MPI_Barrier(MPI_COMM_WORLD);
curtime = starttime = MPI_Wtime();
endtime = starttime+5.0*60.0; /* end in 5 minutes */
MPI_Bcast(&endtime,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
/* must touch the entire array after each write */
/* ensures cache-invalidation */
foffset=0;
i=0;
curtime=starttime;
for(i=0;i<NSTEPS;i++){
int n;
MPI_Status status;
for(j=0;j<6;j++){
/* touch data */
for(n=0;n<localnp;n++)
(data[j])[n]=(double)rank;
/* write to that file */
/* MPI_File_set_view(file,foffset,MPI_DOUBLE,chunktype,"native",info);*/
MPI_File_write_at_all(file,
foffset,
data[j],
localnp,
MPI_DOUBLE,&status);
foffset+=nparticles/nprocs;
}
curtime=MPI_Wtime(); /* ensure no race condition by broadcasting time */
MPI_Bcast(&curtime,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
}
MPI_File_close(&file);
MPI_Barrier(MPI_COMM_WORLD);
endtime=MPI_Wtime();
sprintf(filename,"%s.%u.h5.dat",FILENAME,nprocs);
/* foffset*=nprocs; if we want total megabytes written */
if(rank==0){
puts("*");
unlink(filename);
puts("======================================================");
printf("Raw MPI-IO Total Duration %lf seconds, iterations=%u %lf Megabytes written per processor Nprocs= %u \n",
(endtime-starttime),i,((double)foffset)/(1024.0*1024.0),nprocs);
printf("Raw MPI-IO Effective Data Rate = %lf Megabytes/sec global and %lf Megabytes/sec per task Nprocs= %u \n",
(double)(nprocs*localnp*sizeof(double))*((double)NSTEPS)*6.0/((endtime-starttime)*1024.0*1024.0),
(double)(localnp*sizeof(double))*((double)NSTEPS)*6.0/((endtime-starttime)*1024.0*1024.0),nprocs);
puts("======================================================");
}
MPI_Barrier(MPI_COMM_WORLD); /* to prevent unlink from interfering with file open */
/* OK, now we do this using H5Part */
sprintf(newfilename,"testio%u.%u.dat",rank,nprocs);
unlink(newfilename);
MPI_Barrier(MPI_COMM_WORLD); /* to prevent unlink from interfering with file open */
fd = fopen(newfilename,"w");
/* start the timer */
starttime=endtime=MPI_Wtime();
for(i=0;i<NSTEPS;i++){
for(j=0;j<6;j++){
/* touch data */
for(n=0;n<localnp;n++)
(data[j])[n]=(double)rank;
fwrite(data[j],sizeof(double),localnp,fd);
}
curtime=MPI_Wtime(); /* ensure no race condition by broadcasting time */
MPI_Bcast(&curtime,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
}
fclose(fd);
MPI_Barrier(MPI_COMM_WORLD);
endtime=MPI_Wtime();
if(rank==0) puts("*");
MPI_Barrier(MPI_COMM_WORLD); /* to prevent unlink from interfering with file open */
unlink(newfilename);
MPI_Barrier(MPI_COMM_WORLD);
if(rank==0){
puts("======================================================");
printf("Raw 1-file-per-proc Total Duration %lf seconds, iterations=%u %lf Megabytes written Nprocs= %u \n",
(endtime-starttime),NSTEPS,((double)foffset)/(1024.0*1024.0),nprocs);
printf("Raw 1-file-per-proc Effective Data Rate = %lf Megabytes/sec global and %lf Megabytes/sec per task Nprocs= %u \n",
(double)(nprocs*localnp*sizeof(double))*((double)NSTEPS)*6.0/((endtime-starttime)*1024.0*1024.0),
(double)(localnp*sizeof(double))*((double)NSTEPS)*6.0/((endtime-starttime)*1024.0*1024.0),nprocs);
puts("======================================================");
}
#ifndef DISABLE_H5PART
MPI_Barrier(MPI_COMM_WORLD); /* to prevent unlink from interfering with file open */
/* OK, now we do this using H5Part */
f = H5PartOpenFileParallel(filename,H5PART_WRITE,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD); /* to prevent unlink from interfering with file open */
/* start the timer */
starttime=endtime=MPI_Wtime();
H5PartSetNumParticles(f,localnp);
for(i=0;i<NSTEPS;i++){
for(j=0;j<6;j++){
/* touch data */
for(n=0;n<localnp;n++)
(data[j])[n]=(double)rank;
}
H5PartSetStep(f,i);
H5PartWriteDataFloat64(f,"x",x);
H5PartWriteDataFloat64(f,"y",y);
H5PartWriteDataFloat64(f,"z",z);
H5PartWriteDataFloat64(f,"px",px);
H5PartWriteDataFloat64(f,"py",py);
H5PartWriteDataFloat64(f,"pz",pz);
curtime=MPI_Wtime(); /* ensure no race condition by broadcasting time */
MPI_Bcast(&curtime,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
}
H5PartCloseFile(f);
MPI_Barrier(MPI_COMM_WORLD);
endtime=MPI_Wtime();
if(rank==0){
puts("*");
unlink(filename);
puts("======================================================");
printf("H5Part Total Duration %lf seconds, iterations=%u %lf Megabytes written Nprocs= %u \n",
(endtime-starttime),NSTEPS,((double)foffset)/(1024.0*1024.0),nprocs);
printf("H5Part Effective Data Rate = %lf Megabytes/sec global and %lf Megabytes/sec per task Nprocs= %u \n",
(double)(nprocs*localnp*sizeof(double))*((double)NSTEPS)*6.0/((endtime-starttime)*1024.0*1024.0),
(double)(localnp*sizeof(double))*((double)NSTEPS)*6.0/((endtime-starttime)*1024.0*1024.0),nprocs);
puts("======================================================");
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
} /* trials */
MPI_Finalize();
}
+207
View File
@@ -0,0 +1,207 @@
#include <stdio.h>
#include <stdlib.h>
#include <hdf5.h>
#include "H5Part.h"
#include "H5Block.h"
#if 0
int ReadFile(const char *fn){
char name[64];
H5PartFile *file;
h5part_int64_t i,t,nt,nds,myproc;
unsigned int steps;
printf ( "Open %s\n", fn );
file = H5PartOpenFile (fn, H5PART_READ);
nt=H5PartGetNumSteps(file);
H5PartSetStep(file,0);
nds=H5PartGetNumDatasets(file);
printf ( "Timesteps = %d; dataSets per timestep = %d\n", nt, nds );
printf ( "\n\n===============================" );
for(i=0;i<nds;i++){
H5PartGetDatasetName(file,i,name,64);
printf("\tDataset[%u] name=[%s]\n",
i,name);
}
printf ( "===============================\n\n" );
for (steps=0; steps<nt; steps++) {
H5PartSetStep(file,steps);
h5part_int64_t n = H5PartGetNumParticles(file);
printf ( "number of particles this step = %d\n", n );
double *x = malloc ( n * sizeof ( *x ) );
double *y = malloc ( n * sizeof ( *y ) );
double *z = malloc ( n * sizeof ( *z ) );
double *px= malloc ( n * sizeof ( *px ) );
double *py= malloc ( n * sizeof ( *py ) );
double *pz= malloc ( n * sizeof ( *pz ) );
h5part_int64_t *id = malloc ( n * sizeof ( *id ) );
H5PartReadParticleStep(file,steps,x,y,z,px,py,pz,id);
double sumx = 0.0;
double sumpz = 0.0;
for ( i=0; i<n; i++ ) {
sumx += x[i];
sumpz += pz[i];
}
printf ("\tstep=%d sum(x)= %d sum(pz)=%d\n",
steps, sumx, sumpz );
printf ("\tfirst x is %d\tlast x is %d\n",
x[0], x[n-1] );
printf ("\tFor fake data, expect sumx to be = %f\n",
x[0]*((double)n) );
free ( x );
free ( y );
free ( z );
free ( px );
free ( py );
free ( pz );
free ( id );
}
{
h5part_float64_t *data = malloc ( 4*6*8* sizeof(*data) );
h5part_int64_t herr;
h5part_int64_t i, j, k;
h5part_int64_t i_max, j_max, k_max;
i_max = 3;
j_max = 5;
k_max = 7;
herr = H5BlockOpen ( file );
if ( herr < 0 ) {
printf ("Ops!\n");
exit ( 2 );
}
herr = H5BlockDefine3DFieldLayout ( file, 0, i_max, 0, j_max, 0, k_max );
if ( herr < 0 ) {
printf ("Ops!\n");
exit ( 2 );
}
herr = H5Block3dReadScalarField ( file, "test", data );
if ( herr < 0 ) {
printf ("Ops!\n");
exit ( 2 );
}
for ( i = 0; i <= i_max; i++ ) {
for ( j = 0; j <= j_max; j++ ) {
for ( k = 0; k <= k_max; k++ ) {
if ( *(data+ i + j*(i_max+1) + k*(i_max+1)*(j_max+1)) != i + 100*j + 10000*k ) {
printf ( "Block data error!\n" );
exit ( 2 );
}
}
}
}
i_max = 2;
j_max = 2;
k_max = 2;
herr = H5BlockDefine3DFieldLayout ( file, 0, i_max, 0, j_max, 0, k_max );
if ( herr < 0 ) {
printf ("Ops!\n");
exit ( 2 );
}
herr = H5Block3dReadScalarField ( file, "test", data );
if ( herr < 0 ) {
printf ("Ops!\n");
exit ( 2 );
}
for ( i = 0; i <= i_max; i++ ) {
for ( j = 0; j <= j_max; j++ ) {
for ( k = 0; k <= k_max; k++ ) {
printf ( "%f\n", *(data+ i + j*(i_max+1) + k*(i_max+1)*(j_max+1)) );
}
}
}
herr = H5BlockClose ( file );
if ( herr < 0 ) {
printf ("Ops!\n");
exit ( 2 );
}
free ( data );
}
H5PartCloseFile(file);
return 1;
}
#endif
int WriteFile(const char *fn){
H5PartFile *f;
h5part_int64_t i, j, k;
int timestep;
int timesteps = 5;
h5part_int64_t herr;
h5part_float64_t *data;
h5part_int64_t i_dims, j_dims, k_dims;
printf ("Open %s\n", fn );
f = H5PartOpenFileParallel ( fn, H5PART_WRITE, MPI_COMM_WORLD );
herr = H5BlockOpen ( f );
if ( herr < 0 ) exit ( 2 );
for ( timestep = 0; timestep < timesteps; timestep++){
herr = H5PartSetStep ( f, timestep );
if ( herr < 0 ) exit ( 2 );
printf ( "Write Step %d\n", timestep );
i_dims = 4;
j_dims = 6;
k_dims = 8;
data = (h5part_float64_t*) malloc ( i_dims * j_dims * k_dims * sizeof(*data) );
for ( i = 0; i < i_dims; i++ ) {
for ( j = 0; j < j_dims; j++ ) {
for ( k = 0; k < k_dims; k++ ) {
*(data+ i + j*i_dims + k*i_dims*j_dims) = i+ 10*j + 100*k + 1000*f->myproc;
}
}
}
herr = H5BlockDefine3DFieldLayout ( f, f->myproc*2, f->myproc*2+i_dims-1, 0, j_dims-1, 0, k_dims-1 );
if ( herr < 0 ) exit ( 2 );
herr = H5Block3dWriteScalarField ( f, "test", data );
if ( herr < 0 ) exit ( 2 );
free ( data );
}
herr = H5BlockClose ( f );
if ( herr < 0 ) exit ( 2 );
H5PartCloseFile ( f );
return 1;
}
int main(int argc,char **argv){
char *fn = "testfile.h5";
MPI_Init(&argc,&argv);
H5PartSetVerbosityLevel ( 40 );
if(!WriteFile(fn)){
printf ("Failed to write file %s\n", fn );
exit ( 2 );
}
#if 0
if(!ReadFile(fn)){
printf ("Failed to read file %s\n", fn );
exit ( 2 );
}
#endif
return 0;
}
+224
View File
@@ -0,0 +1,224 @@
#include <stdio.h>
#include <stdlib.h>
#include <hdf5.h>
#include "H5Part.h"
#include "H5Block.h"
#include <mpi.h>
static h5part_int64_t
calc_index_KJI (
h5part_int64_t i,
h5part_int64_t x_extent,
h5part_int64_t j,
h5part_int64_t y_extent,
h5part_int64_t k,
h5part_int64_t z_extent
) {
return i + j*x_extent + k*x_extent*y_extent;
}
static int
_define_3dlayout (
H5PartFile *f,
h5part_int64_t *dims,
int myproc
) {
h5part_int64_t i_start = 0;
h5part_int64_t i_end = 63;
h5part_int64_t j_start = 0;
h5part_int64_t j_end = 63;
h5part_int64_t k_start = 64 * myproc;
h5part_int64_t k_end = k_start + 63;
herr_t herr = H5BlockDefine3DFieldLayout ( f,
i_start, i_end,
j_start, j_end,
k_start, k_end );
if ( herr < 0 ) return -1;
dims[0] = i_end - i_start + 1;
dims[1] = j_end - j_start + 1;
dims[2] = k_end - k_start + 1;
return 0;
}
static int
_read3d_scalar (
H5PartFile *f,
const char *name,
h5part_int64_t *dims,
int myproc
) {
h5part_float64_t *data;
int i, j, k, idx;
data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*data) );
herr_t herr = H5Block3dReadScalarField ( f, name, data );
if ( herr < 0 ) return -1;
for ( i = 0; i < dims[0]; i++ ) {
for ( j = 0; j < dims[1]; j++ ) {
printf ("(%d,%d,0): ", i, j );
for ( k = 0; k < dims[2]; k++ ) {
idx = calc_index_KJI ( i, dims[0], j, dims[1], k, dims[2] );
printf ( "%.0f ", *(data+idx) );
}
printf ( "\n" );
}
}
return 0;
}
static int
_read3d_3d_vector (
H5PartFile *f,
const char *name,
h5part_int64_t *dims
) {
h5part_float64_t *x_data;
h5part_float64_t *y_data;
h5part_float64_t *z_data;
x_data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*x_data) );
y_data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*y_data) );
z_data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*z_data) );
herr_t herr = H5Block3dRead3dVectorField ( f, name,
x_data, y_data, z_data );
if ( herr < 0 ) return -1;
return 0;
}
static int
_read_field (
H5PartFile *f,
h5part_int64_t idx,
int myproc
) {
char name[256];
h5part_int64_t grid_rank;
h5part_int64_t grid_dims[16];
h5part_int64_t field_dims;
herr_t herr = H5BlockGetFieldInfo (
f,
idx,
name, sizeof(name),
&grid_rank,
grid_dims,
&field_dims );
if ( herr < 0 ) return -1;
printf ( "Field #%lld has name \"%s\"\n", (long long) idx, name );
switch ( grid_rank ) {
case 3:
printf ( "\tGrid dimension of \"%s\" is 3\n", name );
printf ( "\tThe dimesion sizes are: (%lld, %lld, %lld)\n",
(long long)grid_dims[0], (long long)grid_dims[1], (long long)grid_dims[2] );
if ( field_dims == 1 ) {
printf ( "\tField data are scalar\n" );
} else {
printf ( "\tField data are %lld vectors\n",
(long long)field_dims );
}
_define_3dlayout ( f, grid_dims, myproc );
if ( field_dims == 1 ) {
_read3d_scalar ( f, name, grid_dims, myproc );
} else if ( field_dims == 3 ) {
_read3d_3d_vector ( f, name, grid_dims );
}
break;
default:
printf ( "unknown grid rank %lld!!!\n", (long long)grid_rank );
}
return 0;
}
static int
ReadFile (
const char *fn,
MPI_Comm comm,
int myproc
) {
H5PartFile *f;
h5part_int64_t herr;
h5part_int64_t timestep;
h5part_int64_t timesteps;
h5part_int64_t i, n;
printf ("Opening file %s for reading\n", fn );
f = H5PartOpenFileParallel ( fn, H5PART_READ, comm );
if ( f == NULL ) return -1;
timesteps = H5PartGetNumSteps ( f );
printf ( "Timesteps = %lld\n", (long long)timesteps );
printf ( "\n===============================\n" );
for ( timestep = 0; timestep < timesteps; timestep++) {
herr = H5PartSetStep ( f, timestep );
if ( herr < 0 ) return -1;
n = H5BlockGetNumFields ( f );
if ( n < 0 ) return -1;
printf ( "Number of fields in time-step #%lld: %lld \n",
(long long)timestep, (long long)n );
for ( i = 0; i < n; i++ ) {
_read_field ( f, i, myproc );
}
}
return H5PartCloseFile ( f );
}
int
main (
int argc,
char **argv
) {
char *fn = "blockfile1.h5";
int myproc;
int nprocs;
H5PartSetVerbosityLevel ( 10 );
MPI_Comm comm=MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_size(comm,&nprocs);
MPI_Comm_rank(comm,&myproc);
if ( ReadFile ( fn, comm, 1 ) < 0 ){
printf ("Failed to read file %s\n", fn );
exit ( 2 );
}
return 0;
}
+311
View File
@@ -0,0 +1,311 @@
#include <stdio.h>
#include <stdlib.h>
#include <hdf5.h>
#include "H5Part.h"
#include "H5Block.h"
static h5part_int64_t
calc_index_KJI (
h5part_int64_t i,
h5part_int64_t x_extent,
h5part_int64_t j,
h5part_int64_t y_extent,
h5part_int64_t k,
h5part_int64_t z_extent
) {
return i + j*x_extent + k*x_extent*y_extent;
}
static int
_define_3dlayout (
H5PartFile *f,
h5part_int64_t *dims
) {
herr_t herr = H5BlockDefine3DFieldLayout ( f,
0, dims[0]-1,
0, dims[1]-1,
0, dims[2]-1 );
if ( herr < 0 ) return -1;
return 0;
}
static int
_read3d_scalar (
H5PartFile *f,
const char *name,
h5part_int64_t *dims
) {
h5part_float64_t *data;
int i, j, k, idx;
data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*data) );
herr_t herr = H5Block3dReadScalarField ( f, name, data );
if ( herr < 0 ) return -1;
herr = H5BlockDefine3DFieldLayout ( f,
1, 3,
1, 4,
0, 4 );
if ( herr < 0 ) return -1;
herr = H5Block3dReadScalarField ( f, name, data );
if ( herr < 0 ) return -1;
for ( i = 0; i < 3; i++ ) {
for ( j = 0; j < 4; j++ ) {
printf ("(%d,%d,0): ", i, j );
for ( k = 0; k < 5; k++ ) {
idx = calc_index_KJI ( i, 3, j, 4, k, 5 );
printf ( "%.0f ", *(data+idx) );
}
printf ( "\n" );
}
}
herr = H5BlockDefine3DFieldLayout ( f,
0, dims[0]-1,
0, dims[1]-1,
0, dims[2]-1 );
if ( herr < 0 ) return -1;
herr = H5Block3dReadScalarField ( f, name, data );
if ( herr < 0 ) return -1;
for ( i = 0; i < dims[0]; i++ ) {
for ( j = 0; j < dims[1]; j++ ) {
printf ("(%d,%d,0): ", i, j );
for ( k = 0; k < dims[2]; k++ ) {
idx = calc_index_KJI ( i, dims[0], j, dims[1], k, dims[2] );
printf ( "%.0f ", *(data+idx) );
}
printf ( "\n" );
}
}
return 0;
}
static int
_read3d_3d_vector (
H5PartFile *f,
const char *name,
h5part_int64_t *dims
) {
h5part_float64_t *x_data;
h5part_float64_t *y_data;
h5part_float64_t *z_data;
x_data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*x_data) );
y_data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*y_data) );
z_data = (h5part_float64_t*) malloc (
dims[0] * dims[1] * dims[2] * sizeof(*z_data) );
herr_t herr = H5Block3dRead3dVectorField ( f, name,
x_data, y_data, z_data );
if ( herr < 0 ) return -1;
return 0;
}
static int
_read_field (
H5PartFile *f,
h5part_int64_t idx
) {
char name[256];
h5part_int64_t grid_rank;
h5part_int64_t grid_dims[16];
h5part_int64_t field_dims;
herr_t herr = H5BlockGetFieldInfo (
f,
idx,
name, sizeof(name),
&grid_rank,
grid_dims,
&field_dims );
if ( herr < 0 ) return -1;
printf ( "Field #%lld has name \"%s\"\n", (long long) idx, name );
switch ( grid_rank ) {
case 3:
printf ( "\tGrid dimension of \"%s\" is 3\n", name );
printf ( "\tThe dimesion sizes are: (%lld, %lld, %lld)\n",
(long long)grid_dims[0], (long long)grid_dims[1], (long long)grid_dims[2] );
if ( field_dims == 1 ) {
printf ( "\tField data are scalar\n" );
} else {
printf ( "\tField data are %lld vectors\n",
(long long)field_dims );
}
_define_3dlayout ( f, grid_dims );
if ( field_dims == 1 ) {
_read3d_scalar ( f, name, grid_dims );
} else if ( field_dims == 3 ) {
_read3d_3d_vector ( f, name, grid_dims );
}
break;
default:
printf ( "unknown grid rank %lld!!!\n", (long long)grid_rank );
}
#if 0
for ( i = 0; i < i_dims; i++ ) {
for ( j = 0; j < j_dims; j++ ) {
for ( k = 0; k < k_dims; k++ ) {
h5part_int64_t idx;
idx = k + j*k_dims + i*k_dims*j_dims;
if ( *(data+idx) != i + 10*j + 100*k ) {
printf ( "Block data error!\n" );
return -1;
}
}
}
}
free ( data );
#endif
return 0;
}
static int
ReadFile (
const char *fn
) {
H5PartFile *f;
h5part_int64_t herr;
h5part_int64_t timestep;
h5part_int64_t timesteps;
h5part_int64_t i, n;
printf ("Opening file %s\n", fn );
f = H5PartOpenFile ( fn, H5PART_READ );
if ( herr < 0 ) return -1;
timesteps = H5PartGetNumSteps ( f );
printf ( "Timesteps = %lld\n", (long long)timesteps );
printf ( "\n===============================\n" );
for ( timestep = 0; timestep < timesteps; timestep++) {
herr = H5PartSetStep ( f, timestep );
if ( herr < 0 ) return -1;
n = H5BlockGetNumFields ( f );
if ( n < 0 ) return -1;
printf ( "Number of fields in time-step #%lld: %lld \n",
(long long)timestep, (long long)n );
for ( i = 0; i < n; i++ ) {
_read_field ( f, i );
}
}
return H5PartCloseFile ( f );
}
int
WriteFile (
const char *fn
) {
H5PartFile *f;
h5part_int64_t i, j, k;
int timestep;
int timesteps = 5;
h5part_int64_t herr;
h5part_float64_t *data;
h5part_int64_t i_dims, j_dims, k_dims;
printf ("Opening file \"%s\" for writing\n", fn );
i_dims = 4;
j_dims = 6;
k_dims = 8;
data = (h5part_float64_t*) malloc (
i_dims * j_dims * k_dims * sizeof(*data) );
f = H5PartOpenFile ( fn, H5PART_WRITE );
if ( herr < 0 ) return -1;
for ( timestep = 0; timestep < timesteps; timestep++){
herr = H5PartSetStep ( f, timestep );
if ( herr < 0 ) return -1;
printf ( "Write Step %d\n", timestep );
for ( i = 0; i < i_dims; i++ ) {
for ( j = 0; j < j_dims; j++ ) {
for ( k = 0; k < k_dims; k++ ) {
h5part_int64_t idx;
idx = calc_index_KJI ( i, i_dims, j, j_dims, k, k_dims );
*(data + idx) = k+ 10*j + 100*i;
}
}
}
herr = H5BlockDefine3DFieldLayout ( f,
0, i_dims-1,
0, j_dims-1,
0, k_dims-1 );
if ( herr < 0 ) return -1;
herr = H5Block3dWriteScalarField ( f, "scalar", data );
if ( herr < 0 ) return -1;
herr = H5Block3dWrite3dVectorField ( f, "3dVector",
data, data, data );
if ( herr < 0 ) return -1;
}
free ( data );
return H5PartCloseFile ( f );
}
int
main (
int argc,
char **argv
) {
char *fn;
char dstr[]="testfile.h5";
if ( argc > 1 )
fn = argv[1];
else
fn = dstr;
H5PartSetVerbosityLevel ( 10 );
if ( WriteFile ( fn ) < 0 ){
printf (" Failed to write file %s\n", fn );
exit ( 2 );
}
if ( ReadFile ( fn ) < 0 ){
printf ("Failed to read file %s\n", fn );
exit ( 2 );
}
return 0;
}
+92
View File
@@ -0,0 +1,92 @@
#include <stdio.h>
#include <stdlib.h>
#include <hdf5.h>
#include "H5Part.h"
#include "H5Block.h"
static h5part_int64_t
calc_index_KJI (
h5part_int64_t i,
h5part_int64_t x_extent,
h5part_int64_t j,
h5part_int64_t y_extent,
h5part_int64_t k,
h5part_int64_t z_extent
) {
return i + j*x_extent + k*x_extent*y_extent;
}
int
WriteFile (
const char *fn,
h5part_int64_t i_dims,
h5part_int64_t j_dims,
h5part_int64_t k_dims
) {
H5PartFile *f;
h5part_int64_t i, j, k;
int timestep;
int timesteps = 1;
h5part_int64_t herr;
h5part_float64_t *data;
printf ("Opening file \"%s\" for writing\n", fn );
data = (h5part_float64_t*) malloc (
i_dims * j_dims * k_dims * sizeof(*data) );
f = H5PartOpenFile ( fn, H5PART_WRITE );
if ( herr < 0 ) return -1;
for ( timestep = 0; timestep < timesteps; timestep++){
herr = H5PartSetStep ( f, timestep );
if ( herr < 0 ) return -1;
printf ( "Write Step %d\n", timestep );
for ( i = 0; i < i_dims; i++ ) {
for ( j = 0; j < j_dims; j++ ) {
for ( k = 0; k < k_dims; k++ ) {
h5part_int64_t idx;
idx = calc_index_KJI ( i, i_dims, j, j_dims, k, k_dims );
*(data + idx) = k+ 1000*j + 100000*i;
}
}
}
herr = H5BlockDefine3DFieldLayout ( f,
0, i_dims-1,
0, j_dims-1,
0, k_dims-1 );
if ( herr < 0 ) return -1;
herr = H5Block3dWriteScalarField ( f, "scalar", data );
if ( herr < 0 ) return -1;
}
free ( data );
return H5PartCloseFile ( f );
}
int
main (
int argc,
char **argv
) {
char *fn = "blockfile1.h5";
H5PartSetVerbosityLevel ( 10 );
if ( WriteFile ( fn, 64, 64, 512 ) < 0 ){
printf (" Failed to write file %s\n", fn );
exit ( 2 );
}
return 0;
}
+111
View File
@@ -0,0 +1,111 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hdf5.h>
#include "H5Part.hh"
#ifdef READTEST
#endif
#ifdef REGRESSIONTEST
/*
A simple regression test that shows how you use this API
to write and read multi-timestep files of particle data.
*/
#ifdef PARALLEL_IO
int main(int argc,char *argv[]){
int sz=5;
double *x,*y,*z;
h5part_int64_t *id;
char name[64];
H5PartFile *file;
int i,t,nt,nds;
int nprocs,myproc;
hid_t gid;
MPI_Comm comm=MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_size(comm,&nprocs);
MPI_Comm_rank(comm,&myproc);
x=(double*)malloc(sz*nprocs*sizeof(double));
y=(double*)malloc(sz*nprocs*sizeof(double));
z=(double*)malloc(sz*nprocs*sizeof(double));
id=(h5part_int64_t*)malloc(sz*nprocs*sizeof(h5part_int64_t));
/* parallel file creation */
file=H5PartOpenFileParallel("parttest.h5",H5PART_WRITE,comm);
if(!file) {
perror("File open failed: exiting!");
exit(0);
}
for(t=0;t<5;t++){
MPI_Barrier(comm);
for(i=0;i<sz;i++) {
x[i]=(double)(i+t)+10.0*(double)myproc;
y[i]=0.1 + (double)(i+t);
z[i]=0.2 + (double)(i+t*10);
id[i]=i+sz*myproc;
}
printf("Proc[%u] Writing timestep %u file=%u\n",myproc,t,file->file);
H5PartSetStep(file,t); /* must set the current timestep in file */
H5PartSetNumParticles(file,sz); /* then set number of particles to store */
/* now write different tuples of data into this timestep of the file */
H5PartWriteDataFloat64(file,"x",x);
H5PartWriteDataFloat64(file,"y",y);
H5PartWriteDataFloat64(file,"z",z);
H5PartWriteDataFloat64(file,"px",x);
H5PartWriteDataFloat64(file,"py",y);
H5PartWriteDataFloat64(file,"pz",z);
H5PartWriteDataInt64(file,"id",id);
}
unsigned int idStart = 0+sz*myproc;
unsigned int idEnd = (sz-1)+sz*myproc;
printf("AllDone p[%u]\n",myproc);
H5PartCloseFile(file);
MPI_Barrier(comm);
printf("p[%u:%u] : OK, close file and reopen for reading idStart %u idEnd %u \n",myproc,nprocs,idStart,idEnd);
file=H5PartOpenFileParallel("parttest.h5",H5PART_READ,comm);
H5PartSetStep(file,0);
unsigned int np = 0;
// unsigned int np = (int)H5PartGetNumParticles(file);
// nt=H5PartGetNumSteps(file); /* get number of steps in file */
//nds=H5PartGetNumDatasets(file); /* get number of datasets in timestep 0 */
MPI_Barrier(comm);
// H5PartSetView(file,idStart,idEnd);
printf("steps= %u datasets= %u particles= %u\n",nt,nds,np);
if(x)
free(x);
if(y)
free(y);
if(z)
free(z);
if(id)
free(id);
H5PartCloseFile(file);
MPI_Barrier(comm);
fprintf(stderr,"proc[%u]: done\n",myproc);
return MPI_Finalize();
}
#else
#endif
#endif
+119
View File
@@ -0,0 +1,119 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hdf5.h>
#include "H5Part.hh"
/*
A simple regression test that shows how you use this API
to write and read multi-timestep files of particle data.
*/
#ifdef PARALLEL_IO
int main(int argc,char *argv[]){
int N = 10;
int sz=0;
double *x,*y,*z;
h5part_int64_t *id;
H5PartFile *file;
int i,t,nt,nds;
int nprocs,myproc;
unsigned int np = 0;
MPI_Comm comm=MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_size(comm,&nprocs);
MPI_Comm_rank(comm,&myproc);
/* parallel file creation */
file=H5PartOpenFileParallel("parttest.h5",H5PART_WRITE,comm);
if(!file) {
perror("File open failed: exiting!");
exit(0);
}
for(t=0;t<5;t++){
MPI_Barrier(comm);
sz = myproc*N;
// proc[0] sz = 10, (next step N=10), sz=10
// proc[1] sz = 20, (next step N=20), sz=40
fprintf(stderr,"proc[%u] sz=%u\n",myproc,(unsigned)sz);
x =(double*)malloc(1+sz*sizeof(double));
y =(double*)malloc(1+sz*sizeof(double));
z =(double*)malloc(1+sz*sizeof(double));
id=(h5part_int64_t*)malloc(1+sz*sizeof(h5part_int64_t));
for(i=0;i<sz;i++) {
x[i]=(double)(i+t)+10.0*(double)myproc;
y[i]=0.1 + (double)(i+t);
z[i]=0.2 + (double)(i+t*10);
id[i]=i+sz*myproc;
}
fprintf(stderr,"Proc[%u] Writing timestep %u Np=%u\n",myproc,t,sz);
H5PartSetStep(file,t); /* must set the current timestep in file */
fprintf(stderr,"Proc[%u]: setNumParticles start\n",myproc);
H5PartSetNumParticles(file,sz); /* then set number of particles to store */
fprintf(stderr,"Proc[%u]: setNumParticles done\n",myproc);
/* now write different tuples of data into this timestep of the file */
fprintf(stderr,"Proc[%u]: WriteX start\n",myproc);
H5PartWriteDataFloat64(file,"x",x);
fprintf(stderr,"Proc[%u]: WriteX done\n",myproc);
H5PartWriteDataFloat64(file,"y",y);
H5PartWriteDataFloat64(file,"z",z);
H5PartWriteDataFloat64(file,"px",x);
H5PartWriteDataFloat64(file,"py",y);
H5PartWriteDataFloat64(file,"pz",z);
H5PartWriteDataInt64(file,"id",id);
if(x)
free(x);
if(y)
free(y);
if(z)
free(z);
if(id)
free(id);
// remove the next line and everything is ok
N = 1 + sz;
}
printf("AllDone p[%u]\n",myproc);
H5PartCloseFile(file);
MPI_Barrier(comm);
unsigned int idStart = 0;
unsigned int idEnd = myproc*10;
printf("p[%u:%u] : OK, close file and reopen for reading idStart %u idEnd %u \n",myproc,nprocs,idStart,idEnd);
file=H5PartOpenFileParallel("parttest.h5",H5PART_READ,comm);
H5PartSetStep(file,0);
nt = H5PartGetNumSteps(file); /* get number of steps in file */
nds=H5PartGetNumDatasets(file); /* get number of datasets in timestep 0 */
MPI_Barrier(comm);
H5PartSetView(file,idStart,idEnd);
np = H5PartGetNumParticles(file);
printf("steps= %u datasets= %u particles= %u\n",nt,nds,np);
H5PartCloseFile(file);
MPI_Barrier(comm);
fprintf(stderr,"proc[%u]: done\n",myproc);
return MPI_Finalize();
}
#endif
+260
View File
@@ -0,0 +1,260 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hdf5.h>
#include "H5Part.hh"
/*
A simple regression test that shows how you use this API
to write and read multi-timestep files of particle data.
*/
#ifdef PARALLEL_IO
int main(int argc,char *argv[]){
int sz=5;
double *x,*y,*z;
h5part_int64_t *id;
H5PartFile *file;
int i,t,nt,nds;
int nprocs,myproc;
MPI_Comm comm=MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_size(comm,&nprocs);
MPI_Comm_rank(comm,&myproc);
x=(double*)malloc(sz*nprocs*sizeof(double));
y=(double*)malloc(sz*nprocs*sizeof(double));
z=(double*)malloc(sz*nprocs*sizeof(double));
id=(h5part_int64_t*)malloc(sz*nprocs*sizeof(h5part_int64_t));
/* parallel file creation */
file=H5PartOpenFileParallel("parttest.h5",H5PART_WRITE,comm);
if(!file) {
perror("File open failed: exiting!");
exit(0);
}
for(t=0;t<5;t++){
MPI_Barrier(comm);
for(i=0;i<sz;i++) {
x[i]=(double)(i+t)+10.0*(double)myproc;
y[i]=0.1 + (double)(i+t);
z[i]=0.2 + (double)(i+t*10);
id[i]=i+sz*myproc;
}
printf("Proc[%u] Writing timestep %u file=%u\n",myproc,t,file->file);
H5PartSetStep(file,t); /* must set the current timestep in file */
H5PartSetNumParticles(file,sz); /* then set number of particles to store */
/* now write different tuples of data into this timestep of the file */
H5PartWriteDataFloat64(file,"x",x);
H5PartWriteDataFloat64(file,"y",y);
H5PartWriteDataFloat64(file,"z",z);
H5PartWriteDataFloat64(file,"px",x);
H5PartWriteDataFloat64(file,"py",y);
H5PartWriteDataFloat64(file,"pz",z);
H5PartWriteDataInt64(file,"id",id);
}
unsigned int idStart = 0+sz*myproc;
unsigned int idEnd = (sz-1)+sz*myproc;
printf("AllDone p[%u]\n",myproc);
H5PartCloseFile(file);
fprintf(stderr,"Closed files p[%u]\n",myproc);
MPI_Barrier(comm);
fprintf(stderr,"p[%u:%u] : OK, close file and reopen for reading idStart %u idEnd %u \n",myproc,nprocs,idStart,idEnd);
file=H5PartOpenFileParallel("parttest.h5",H5PART_READ,comm);
H5PartSetStep(file,0);
// unsigned int np = 0;
unsigned int np = (int)H5PartGetNumParticles(file);
nt=H5PartGetNumSteps(file); /* get number of steps in file */
nds=H5PartGetNumDatasets(file); /* get number of datasets in timestep 0 */
MPI_Barrier(comm);
H5PartSetView(file,idStart,idEnd);
np = (int)H5PartGetNumParticles(file);
printf("After SetView(%d,%d): steps= %u datasets= %u particles= %u\n",
(int)idStart,(int)idEnd,
nt,nds,np);
if(x)
free(x);
if(y)
free(y);
if(z)
free(z);
if(id)
free(id);
H5PartCloseFile(file);
MPI_Barrier(comm);
fprintf(stderr,"proc[%u]: done\n",myproc);
return MPI_Finalize();
}
#else
int main(int argc,char *argv[]){
int sz=10;
double *x,*y,*z;
h5part_int64_t *id;
H5PartFile *file;
int i,t,nt,nds,np;
h5part_int64_t idStart = 0;
h5part_int64_t idEnd = 0;
x=(double*)malloc(sz*sizeof(double));
y=(double*)malloc(sz*sizeof(double));
z=(double*)malloc(sz*sizeof(double));
id=(h5part_int64_t*)malloc(sz*sizeof(h5part_int64_t));
/* parallel file creation */
file=H5PartOpenFile("parttest.h5",H5PART_WRITE);
if(!file) {
perror("File open failed: exiting!");
exit(0);
}
H5PartWriteFileAttribString(file,"File Description", "This file is created by H5PartTest.cc. Simple H5Part file for testing purpose...");
char* FileAttrib = "Created by H5PartTest.cc";
H5PartWriteFileAttrib(file, "Origin", H5T_NATIVE_CHAR, FileAttrib ,strlen(FileAttrib));
for(t=0;t<5;t++){
fprintf(stdout,"Writing timestep %u\n",t);
for(i=0;i<sz;i++) {
x[i]=(double)(i+t);
y[i]=0.1 + (double)(i+t);
z[i]=0.2 + (double)(i+t*10);
id[i]=i;
fprintf(stdout,"\tp[%u] x=%f y=%f z=%f id=%d\n",
i,x[i],y[i],z[i],(int)id[i]);
}
H5PartSetStep(file,t); /* must set the current timestep in file */
H5PartSetNumParticles(file,sz); /* then set number of particles to store */
/* now write different tuples of data into this timestep of the file */
H5PartWriteDataFloat64(file,"x",x);
H5PartWriteDataFloat64(file,"y",y);
H5PartWriteDataFloat64(file,"z",z);
H5PartWriteDataFloat64(file,"px",x);
H5PartWriteDataFloat64(file,"py",y);
H5PartWriteDataFloat64(file,"pz",z);
H5PartWriteDataInt64(file,"id",id);
H5PartWriteStepAttribString(file,"Step Description", "STEP STEP STEP");
char* StepAttrib = "STEP";
H5PartWriteStepAttrib(file, "Step", H5T_NATIVE_CHAR, StepAttrib ,strlen(StepAttrib));
}
printf("AllDone writing\n");
H5PartCloseFile(file);
/*+++++++++++++ Reopen File for Reading +++++++++++*/
file=H5PartOpenFile("parttest.h5",H5PART_READ);
/********************************************/
H5PartSetStep(file,0);
nt=H5PartGetNumSteps(file); /* get number of steps in file */
nds=H5PartGetNumDatasets(file); /* get number of datasets in timestep 0 */
np=H5PartGetNumParticles(file);
fprintf(stdout,"OK, close file and reopen for reading\n");
fprintf(stdout,"steps= %u\tdatasets=%u\tparticles= %u\n",
nt,nds,np);
// clear the particles
for(i=0;i<np;i++){
x[i]=y[i]=z[i]=0.0;
id[i]=0;
}
H5PartReadDataFloat64(file,"x",x);
H5PartReadDataFloat64(file,"y",y);
H5PartReadDataFloat64(file,"z",z);
H5PartReadDataInt64(file,"id",id);
for(i=0;i<np;i++){
fprintf(stdout,"\tp[%3u] x=%lf y=%lf z=%lf id=%lld\n",
i,x[i],y[i],z[i],(h5part_int64_t)(id[i]));
}
/********************************************/
printf("Set to last step and reload data\n");
H5PartSetStep(file,nt-1);
H5PartReadDataFloat64(file,"x",x);
H5PartReadDataFloat64(file,"y",y);
H5PartReadDataFloat64(file,"z",z);
H5PartReadDataInt64(file,"id",id);
for(i=0;i<np;i++){
fprintf(stdout,"\tp[%3u] x=%lf y=%lf z=%lf id=%lld\n",
i,x[i],y[i],z[i],(h5part_int64_t) (id[i]));
}
/********************************************/
idEnd=np;
printf("Old View is %d:%d\n",(int)idStart,(int)idEnd);
H5PartSetView(file,idStart,idEnd>>1);
printf("Set new view = %d:%d\n",(int)idStart,(int)(idEnd>>1));
H5PartGetView(file,&idStart,&idEnd);
np=H5PartGetNumParticles(file);
printf("steps= %u datasets= %u particles= %d with view %d:%d\n",
nt,nds,(int)np,(int)idStart,(int)idEnd);
H5PartSetStep(file,nt-1); // set to last step
printf("Setting to last step = %u\n",nt-1);
for(i=0;i<10;i++){ x[i]=y[i]=z[i]=0.0; id[i]=0; } /* clear the arrays */
H5PartReadDataFloat64(file,"x",x);
H5PartReadDataFloat64(file,"y",y);
H5PartReadDataFloat64(file,"z",z);
H5PartReadDataInt64(file,"id",id);
for(i=0;i<np;i++){
fprintf(stdout,"\tp[%3u] x=%lf y=%lf z=%lf id=%lld\n",i,x[i],y[i],z[i],(h5part_int64_t )id[i]);
}
/********************************************/
printf("Now set the view to the latter half of the data in step #%u\n",nt-1);
H5PartResetView(file);
H5PartGetView(file,&idStart,&idEnd);
printf("Reset view = %d:%d\nSetting to %u:%u\n",
(int)idStart,(int)idEnd,
(int)idEnd>>1,(int)idEnd);
H5PartSetView(file,(idEnd>>1),idEnd);
np=H5PartGetNumParticles(file);
printf("Now particles in selection are %d\n",np);
printf("doubleCheck=%lld\n",H5PartGetView(file,0,0));
for(i=0;i<10;i++){ x[i]=y[i]=z[i]=0.0; id[i]=0; } /* clear the arrays */
H5PartReadDataFloat64(file,"x",x);
H5PartReadDataFloat64(file,"y",y);
H5PartReadDataFloat64(file,"z",z);
H5PartReadDataInt64(file,"id",id);
for(i=0;i<np;i++){
fprintf(stdout,"\tp[%3u] x=%lf y=%lf z=%lf id=%lld\n",i,x[i],y[i],z[i],(h5part_int64_t)id[i]);
}
if(x)
free(x);
if(y)
free(y);
if(z)
free(z);
if(id)
free(id);
H5PartCloseFile(file);
fprintf(stderr,"done\n");
}
#endif
+120
View File
@@ -0,0 +1,120 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hdf5.h>
#include "H5Part.hh"
#ifdef PARALLEL_IO
/*
This regression test is used to ensure parallel I/O is
working correctly and that Views are working for
parallel reads.
*/
int main(int argc,char *argv[]){
const int sz=5000;
double *x,*y,*z;
h5part_int64_t *id;
char name[64];
H5PartFile *file;
int i,t,nt,nds;
int nprocs,myproc;
hid_t gid;
MPI_Comm comm=MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_size(comm,&nprocs);
MPI_Comm_rank(comm,&myproc);
x=(double*)malloc(sz*nprocs*sizeof(double));
y=(double*)malloc(sz*nprocs*sizeof(double));
z=(double*)malloc(sz*nprocs*sizeof(double));
id=(h5part_int64_t*)malloc(sz*nprocs*sizeof(h5part_int64_t));
/* parallel file creation */
file=H5PartOpenFileParallel("parttest.h5",H5PART_WRITE,comm);
if(!file) {
perror("File open failed: exiting!");
exit(0);
}
for(t=0;t<5;t++){
MPI_Barrier(comm);
for(i=0;i<sz;i++) {
x[i]=(double)(i+t)+10.0*(double)myproc;
y[i]=0.1 + (double)(i+t);
z[i]=0.2 + (double)(i+t*10);
id[i]=i+sz*myproc;
}
printf("Proc[%u] Writing timestep %u\n",myproc,t);
if(t==0){
printf("Proc[%u]: data values x[first,last]=%f:%f y[%u:%u]=%f:%f z[:]=%f:%f id[:]=%f:%f\n",
myproc,x[0],x[sz-1],0,sz-1,y[0],y[sz-1],z[0],z[sz-1],(int)id[0],(int)id[sz-1]);
}
H5PartSetStep(file,t); /* must set the current timestep in file */
H5PartSetNumParticles(file,sz); /* then set number of particles to store */
/* now write different tuples of data into this timestep of the file */
H5PartWriteDataFloat64(file,"x",x);
H5PartWriteDataFloat64(file,"y",y);
H5PartWriteDataFloat64(file,"z",z);
H5PartWriteDataFloat64(file,"px",x);
H5PartWriteDataFloat64(file,"py",y);
H5PartWriteDataFloat64(file,"pz",z);
H5PartWriteDataInt64(file,"id",id);
}
printf("AllDone p[%u]\n",myproc);
H5PartCloseFile(file);
MPI_Barrier(comm);
printf("p[%u:%u] : OK, close file and reopen for reading\n",myproc,nprocs);
file=H5PartOpenFileParallel("parttest.h5",H5PART_READ,comm);
H5PartSetStep(file,0);
unsigned int np,total_np = (int)H5PartGetNumParticles(file);
nt=H5PartGetNumSteps(file); /* get number of steps in file */
nds = H5PartGetNumDatasets(file);
if(myproc==0){
fprintf(stdout,"steps= %u\tdatasets=%u\tparticles= %u\n",
nt,nds,total_np);
}
MPI_Barrier(comm);
/* now lets compute the appropriate idStart and idEnd
for this particular processor */
unsigned h5part_int64_t idStart = sz*myproc;
unsigned h5part_int64_t idEnd = (sz-1)+sz*myproc;
H5PartSetView(file,idStart,idEnd);
np=H5PartGetNumParticles(file);
printf("Proc[%u]: View=%u:%u : particles= %u\n",
myproc,(int)idStart,(int)idEnd,H5PartGetNumParticles(file));
/* now lets read them and print some out */
H5PartReadDataFloat64(file,"x",x);
H5PartReadDataFloat64(file,"y",y);
H5PartReadDataFloat64(file,"z",z);
H5PartReadDataInt64(file,"id",id);
printf("Proc[%u]: data values x[first,last]=%f:%f y[%u:%u]=%f:%f z[:]=%f:%f id[:]=%f:%f\n",
myproc,x[0],x[sz-1],(int)idStart,(int)idEnd,y[0],y[sz-1],z[0],z[sz-1],(int)id[0],(int)id[sz-1]);
/* H5PartCloseFile(file); MPI_Finalize(); exit(0); */
if(x)
free(x);
if(y)
free(y);
if(z)
free(z);
if(id)
free(id);
H5PartCloseFile(file);
MPI_Barrier(comm);
fprintf(stderr,"proc[%u]: done\n",myproc);
return MPI_Finalize();
}
#else
#error This file only works when PARALLEL_IO is enabled.
#endif
+63
View File
@@ -0,0 +1,63 @@
#ifdef GTHDF5
template<class T,unsigned int Dim> void DataSink<T,Dim>::savePhaseSpaceData()
{
T *x=(double*)malloc(beam_m->getLocalNum()*sizeof(double));
T *y=(double*)malloc(beam_m->getLocalNum()*sizeof(double));
T *z=(double*)malloc(beam_m->getLocalNum()*sizeof(double));
T *px=(double*)malloc(beam_m->getLocalNum()*sizeof(double));
T *py=(double*)malloc(beam_m->getLocalNum()*sizeof(double));
T *pz=(double*)malloc(beam_m->getLocalNum()*sizeof(double));
h5part_int64_t int *id=(h5part_int64_t int*)malloc(beam_m->getLocalNum()*sizeof(h5part_int64_t));
double actPos = beam_m->getActSPos();
double structLenght = beam_m->getMaxZ();
Vector_t org = beam_m->getOrigin();
Vector_t maxX = beam_m->getRmax();
Vector_t minX = beam_m->getRmin();
Vector_t maxP = beam_m->getPmax();
Vector_t minP = beam_m->getPmin();
unsigned long protons = beam_m->getNumberOfProtons();
unsigned long electrons = beam_m->getNumberOfElectrons();
Vector_t centroid = beam_m->getCentroid();
unsigned nTot = beam_m->getTotalNum();
for (h5part_int64_t i=0; i<beam_m->getLocalNum();i++) {
x[i] = beam_m->R[i](0);
y[i] = beam_m->R[i](1);
z[i] = beam_m->R[i](2);
px[i] = beam_m->P[i](0);
py[i] = beam_m->P[i](1);
pz[i] = beam_m->P[i](2);
if (i< (electrons-1))
id[i] = beam_m->ID[i]; else
id[i] = -1*(long int)beam_m->ID[i];
}
H5PartSetStep(file_m,idx_m); /* must set the current timestep in file */
H5PartSetNumParticles(file_m,beam_m->getLocalNum()); /* then set number of particles to store */
/* now write different tuples of data into this timestep of the file */
H5PartWriteDataFloat64(file_m,"x",x);
H5PartWriteDataFloat64(file_m,"y",y);
H5PartWriteDataFloat64(file_m,"z",z);
H5PartWriteDataFloat64(file_m,"px",px);
H5PartWriteDataFloat64(file_m,"py",py);
H5PartWriteDataFloat64(file_m,"pz",pz);
H5PartWriteDataInt64(file_m,"id",id);
h5part_int64_t step = idx_m;
H5PartWriteStepAttrib(file_m,"Step",H5T_NATIVE_INT64,&step,1);
/* write scalar data i.e the header */
H5PartWriteAttrib(file_m,"Spos",H5T_NATIVE_DOUBLE,&actPos,1);
H5PartWriteAttrib(file_m,"structLen",H5T_NATIVE_DOUBLE,&structLenght,1);
H5PartWriteAttrib(file_m,"org",H5T_NATIVE_DOUBLE,&org,3);
H5PartWriteAttrib(file_m,"maxX",H5T_NATIVE_DOUBLE,&maxX,3);
H5PartWriteAttrib(file_m,"minX",H5T_NATIVE_DOUBLE,&minX,3);
H5PartWriteAttrib(file_m,"maxP",H5T_NATIVE_DOUBLE,&maxP,3);
H5PartWriteAttrib(file_m,"minP",H5T_NATIVE_DOUBLE,&minP,3);
H5PartWriteAttrib(file_m,"centroid",H5T_NATIVE_DOUBLE,&centroid,3);
delete x;
delete y;
delete z;
delete id;
idx_m++;
}
Executable
+152
View File
@@ -0,0 +1,152 @@
#include <stdio.h>
#include <stdlib.h>
#include <hdf5.h>
#include "H5Part.hh"
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
using namespace std;
/*
A simple regression test that shows how you use this API
to write and read multi-timestep files of particle data.
*/
#ifdef PARALLEL_IO
#else
int ReadFile(const string fn){
char name[64];
H5PartFile *file;
int i,nt,nds;
cout << "Open " << fn << endl;
file= H5PartOpenFile(fn.c_str(),H5PART_READ);
nt=H5PartGetNumSteps(file);
H5PartSetStep(file,0);
nds=H5PartGetNumDatasets(file);
cout << "Timesteps = " << nt << " dataSets per timestep = " << nds << endl;
cout << endl << endl << "===============================" << endl;
for(i=0;i<nds;i++){
H5PartGetDatasetName(file,i,name,64);
printf("\tDataset[%u] name=[%s]\n",
i,name);
}
cout << "===============================" << endl << endl;;
for (int steps=0; steps<nt; steps++) {
H5PartSetStep(file,steps);
h5part_int64_t n = H5PartGetNumParticles(file);
cout << "number of particles this step =" << n << endl;
double *x=new double[n];
double *y=new double[n];
double *z=new double[n];
double *px=new double[n];
double *py=new double[n];
double *pz=new double[n];
h5part_int64_t *id=new h5part_int64_t[n];
H5PartReadParticleStep(file,steps,x,y,z,px,py,pz,id);
double sumx = 0.0;
double sumpz = 0.0;
for (h5part_int64_t i=0; i<n; i++) {
sumx += x[i];
sumpz += pz[i];
}
cout << "\tstep= " << steps << " sum(x)= " << sumx << " sum(pz)= " << sumpz << endl;
cout << "\tfirst x is " << x[0] << "\tlast x is " << x[n-1] << endl;
cout << "\tFor fake data, expect sumx to be =" << x[0]*((double)n)<<endl;
delete x;
delete y;
delete z;
delete px;
delete py;
delete pz;
delete id;
}
H5PartCloseFile(file);
return 1;
}
int WriteFile(const string fn){
H5PartFile *file;
int i,t;
h5part_int64_t n;
const int nt = 5;
const h5part_int64_t np = 1024*1024;
cout << "Open " << fn << endl;
file= H5PartOpenFile(fn.c_str(),H5PART_WRITE);
double *x=new double[np];
double *y=new double[np];
double *z=new double[np];
double *px=new double[np];
double *py=new double[np];
double *pz=new double[np];
h5part_int64_t *id=new h5part_int64_t[np];
H5PartSetNumParticles(file,np); // sets number of particles in simulation
for(n=0;n<np;n++) {
id[n]=i;
x[n]=1.0;
y[n]=2.0;
z[n]=3.0;
px[n]=1.0*((double)i)*((double)(i%10));
py[n]=2.0*((double)i)*((double)(i%10));
pz[n]=3.0*((double)i)*((double)(i%10));
}
for(t=0;t<nt;t++){
// setup the step number
H5PartSetStep(file,t);
printf("Write Step %u\n",t);
// write fake data
H5PartWriteDataFloat64(file,"x",x);
H5PartWriteDataFloat64(file,"y",y);
H5PartWriteDataFloat64(file,"z",z);
H5PartWriteDataFloat64(file,"px",px);
H5PartWriteDataFloat64(file,"py",py);
H5PartWriteDataFloat64(file,"pz",pz);
H5PartWriteDataInt64(file,"id",id);
}
H5PartCloseFile(file);
return 1;
}
int main(int argc,char **argv){
char *str;
char dstr[]="testfile.h5";
if(argc>1) str=argv[1];
else str=dstr;
const string fn = string(str);
/* f=fopen(fn.c_str(),"r");
if(f!=NULL) { a poor-man's stat()
fclose(f);
*/
if(!WriteFile(fn)){
cerr << "Failed to write file " << fn << endl;
exit(0);
}
/*
}
else {
cout << "File " << fn << " already exists, so we will proceed to reading" << endl;
} */
if(!ReadFile(fn)){
cerr << "Failed to read file " << fn << endl;
}
}
#endif
+107
View File
@@ -0,0 +1,107 @@
c ==============
c
c Sample Fortran program that uses HDF5 bindings
c
c ==============
program H5testF
implicit none
include 'H5Part.inc'
INTEGER*8 file
INTEGER*8 nstep,ndata
INTEGER*8 npoints
INTEGER*8 step
INTEGER*8 err
INTEGER*8 I,J
REAL*8,ALLOCATABLE:: X(:),Y(:),Z(:),PX(:),PY(:),PZ(:)
INTEGER*8,ALLOCATABLE:: ID(:)
REAL*8 REALTIME
file = h5pt_openw("testfilef.h5")
print *,"Opened file testfilef.h5 for writing"
npoints = 1024
nstep = 10
ALLOCATE(X(npoints),Y(npoints),Z(npoints))
ALLOCATE(PX(npoints),PY(npoints),PZ(npoints))
ALLOCATE(ID(npoints))
print *," Npoints=",npoints," nsteps=",nstep
print *," writing X,Y,Z,PX,PY,PZ,ID"
print *," ... initialize the data arrays"
do I=1,npoints
X(I)=0.0
Y(I)=1.0+I
Z(I)=100.0+I*2.0
ID(I)=I
enddo
print *,"Tell h5pt how many particles are stored in the file"
c set the number of points
err = h5pt_setnpoints(file,npoints)
print *,"write an attribute string"
c write an annotation to the file
err=h5pt_writefileattrib_string(file,"Annotation","Testing 1 2 3")
print *,"and now write the steps"
do I=1,nstep
c Set the step number
print *,"Write Step ",I
err = h5pt_setstep(file,I)
c Now start writing the data arrays for this step
err = h5pt_writedata_r8(file,"x",X)
err = h5pt_writedata_r8(file,"y",Y)
err = h5pt_writedata_r8(file,"z",Z)
err = h5pt_writedata_r8(file,"px",PX)
err = h5pt_writedata_r8(file,"py",PY)
err = h5pt_writedata_r8(file,"pz",PZ)
err = h5pt_writedata_i8(file,"id",ID)
do J=1,npoints
ID(J)=ID(J)+10
enddo
c And write a simple floatingpoint attribute associated with this timestep
REALTIME = I*0.1
err=h5pt_writestepattrib_r8(file,"RealTime",REALTIME,1)
enddo
print *,"Done writing, now close the file"
err = h5pt_close(file)
c **************** Clean out some variables ***************
nstep=0
c npoints=0
do I=1,npoints
X(I)=-1.0
Y(I)=-1.0
Z(I)=-1.0
ID(I)=0
enddo
c *****************Now Reopen for Reading ******************
print *,"Open file for reading"
file = h5pt_openr("testfilef.h5")
print *," Opened testfilef.h5"
nstep = h5pt_getnsteps(file)
print *," Nsteps = ",nstep
err = h5pt_setstep(file,1_8)
print *,"now get the number of datasets"
ndata = h5pt_getndatasets(file)
print *," Ndata=",ndata
npoints = h5pt_getnpoints(file)
print *," NP=",npoints
do step=1,nstep
print *,"Read step ",step
c set the current step
err = h5pt_setstep(file,step)
err=h5pt_readdata_i8(file,"id",ID)
c read the Z data from the current step
c err = h5prt_readdata(file,step,X,Y,Z,PX,PY,PZ,ID)
do J=1,10
print *," ID(",J,")==",ID(J)
enddo
enddo
err = h5pt_close(file)
print *,"done"
end
+123
View File
@@ -0,0 +1,123 @@
! ==============
!
! Sample Fortran program that uses HDF5 bindings
!
! ==============
program H5testFpar
implicit none
include 'mpif.h'
include 'H5PartF90.inc'
INTEGER*8 file
INTEGER*8 nstep
INTEGER*8 ndata
INTEGER*8 npoints,pointoffset
INTEGER*8 step
INTEGER*8 I
INTEGER*8 J
REAL*8,ALLOCATABLE:: X(:),Y(:),Z(:),PX(:),PY(:),PZ(:)
INTEGER*8,ALLOCATABLE:: ID(:)
REAL*8 REALTIME(1)
INTEGER myproc,nprocs
INTEGER ierr
INTEGER*8 rc
INTEGER:: comm
call MPI_Init(ierr)
comm = MPI_COMM_WORLD
call MPI_Comm_rank(comm,myproc,ierr)
call MPI_Comm_size(comm,nprocs,ierr)
file = h5pt_openw_par("testfilef.h5",comm)
print *,"Opened file testfilef.h5 for writing"
npoints = 1024
nstep = 10
ALLOCATE(X(npoints),Y(npoints),Z(npoints))
ALLOCATE(PX(npoints),PY(npoints),PZ(npoints))
ALLOCATE(ID(npoints))
print *," Npoints=",npoints," nsteps=",nstep
print *," writing X,Y,Z,PX,PY,PZ,ID"
print *," ... initialize the data arrays"
do I=1,npoints
X(INT(I))=0.0
Y(I)=1.0+REAL(I)
Z(I)=100.0+REAL(I)*2.0
ID(I)=I
enddo
print *,"Tell h5pt how many particles are stored in the file"
! set the number of points
rc = h5pt_setnpoints(file,npoints)
print *,"write an attribute string"
! write an annotation to the file
rc=h5pt_writefileattrib_string(file,"Annotation","Testing 1 2 3")
print *,"and now write the steps"
do I=1,nstep
! Set the step number
print *,"Write Step ",I
rc = h5pt_setstep(file,I)
! Now start writing the data arrays for this step
rc = h5pt_writedata_r8(file,"x",X)
rc = h5pt_writedata_r8(file,"y",Y)
rc = h5pt_writedata_r8(file,"z",Z)
rc = h5pt_writedata_r8(file,"px",PX)
rc = h5pt_writedata_r8(file,"py",PY)
rc = h5pt_writedata_r8(file,"pz",PZ)
rc = h5pt_writedata_i8(file,"id",ID)
do J=1,npoints
ID(J)=ID(J)+10
enddo
! And write a simple floatingpoint attribute associated with this timestep
REALTIME(1) = REAL(I)*0.1
rc = h5pt_writestepattrib_r8(file,"RealTime",REALTIME,1_8)
enddo
print *,"Done writing, now close the file"
rc = h5pt_close(file)
! **************** Clean out some variables ***************
nstep=0
! npoints=0
do I=1,npoints
X(I)=-1.0
Y(I)=-1.0
Z(I)=-1.0
ID(I)=0
enddo
! *****************Now Reopen for Reading ******************
print *,"Open file for reading"
file = h5pt_openr_par("testfilef.h5",comm)
print *," Opened testfilef.h5"
nstep = h5pt_getnsteps(file)
print *," Nsteps = ",nstep
rc = h5pt_setstep(file,1_8)
print *,"now get the number of datasets"
ndata = h5pt_getndatasets(file)
print *," Ndata=",ndata
npoints = h5pt_getnpoints(file)
print *," Total NP=",npoints
npoints = npoints/nprocs
pointoffset = npoints*myproc;
rc = h5pt_setview(file,pointoffset,pointoffset+npoints)
print *," Pointoffset=",pointoffset," Local NP=",npoints
! Now we need to set our view to read correct points
do step=1,nstep
print *,"Read step ",step
! set the current step
rc = h5pt_setstep(file,step)
rc = h5pt_readdata_i8(file,"id",ID)
! read the Z data from the current step
! rc = h5prt_readdata(file,step,X,Y,Z,PX,PY,PZ,ID)
do J=1,10
print *," ID(",J,")==",ID(J)
enddo
enddo
rc = h5pt_close(file)
print *,"done"
call MPI_Finalize()
end
+178
View File
@@ -0,0 +1,178 @@
# test level Makefile.am
# PATH SETTING
HDF5ROOT = @HDF5ROOT@
PHDF5ROOT = @PHDF5ROOT@
# COMPILER SETTING
CXX = @CXX@
FC = @FC@
MPIFC = @MPIFC@
MPICXX = @MPICXX@
MPICC = @MPICC@
# COMPILER FLAG SETTING
CFLAGS = @CFLAGS@
PCFLAGS = $(CFLAGS) -DPARALLEL_IO -DH5_HAVE_PARALLEL -DMPICH_IGNORE_CXX_SEEK
ADDFLAGS = @ADDFLAGS@
FFLAGS = @FFLAGS@ $(ADDFLAGS)
PFFLAGS = @FFLAGS@ $(CFLAGS)
# LIBRARIES
SZLIB = @SZLIB@
HDFLIB = -L$(HDF5ROOT)/lib -lhdf5 -lz $(SZLIB) @LDFLAGS@
PHDFLIB = -L$(PHDF5ROOT)/lib -lhdf5 -lz $(SZLIB) @LDFLAGS@
MPILIB = @MPILIB@
LIBS = $(HDFLIB) -lm @STDCXX@
PLIBS = $(PHDFLIB) $(MPILIB) -lm @STDCXX@
# H5Part compiled library location
# H5PLIB = -L@prefix@/lib
H5PLIB = -L@H5P_LIB_LOC@
# INCLUDES
HDFINC = -I$(HDF5ROOT)/include
PHDFINC = -I$(PHDF5ROOT)/include
MPIINC = @MPIINC@
PINC = $(PHDFINC) $(MPIINC) $(H5PINC)
INC = $(HDFINC) $(H5PINC)
# H5Part header file location
# H5PINC = -I@prefix@/include
H5PINC = -I@H5P_LIB_LOC@
# What to build... make install will place these files in the $(prefix)/bin directory.
bin_PROGRAMS = @TTARGET@
# Some useful scripts that I wish to place in the $(prefix)/bin directory.
bin_SCRIPTS =
#bin_SCRIPTS = JAC_H5PartTestP_script.scr JAC_H5testFpar_script.scr JAC_RUN_ALL_script.scr
# Listing of all programs that maybe built. (Has to know statically...)
EXTRA_PROGRAMS = Bench H5PartTest H5testF H5PartTestP H5PartAndreasTest H5testFpar H5PartTestParallel H5test H5BlockTest1
# Extra files that I wish to include in the dist tar ball.
EXTRA_DIST = Bench.c H5ecloudExample.cc H5ParallelTest.cc H5test.cc $(bin_SCRIPTS)## TO BE TAILORED LATER...
# Listing of sources
Bench_SOURCES = Bench.c
H5PartAndreasTest_SOURCES = H5PartAndreasTest.cc
H5PartTest_SOURCES = H5PartTest.cc
H5PartTestP_SOURCES = H5PartTest.cc
H5testF_SOURCES = H5testF.f
H5testFpar_SOURCES = H5testFpar.f
H5PartTestParallel_SOURCES = H5PartTestParallel.cc
H5test_SOURCES = H5test.cc
H5BlockTest1_SOURCES = H5BlockTest1.c
# Specific building instruction (What compilers to use...)
# ------------ Build Tests ------------
Bench: Bench.c
$(MPICC) $(PCFLAGS) $(PINC) -o Bench Bench.c $(H5PLIB) -lpH5Part $(PLIBS)
#####################################################################################################################
H5PartTest: H5PartTest.o
$(CXX) -o H5PartTest H5PartTest.o $(H5PLIB) -lH5Part $(LIBS)
H5PartTest.o: H5PartTest.cc
$(CXX) $(CFLAGS) $(INC) -DREGRESSIONTEST -g -c H5PartTest.cc
#####################################################################################################################
H5PartTestP: H5PartTestP.o
$(MPICXX) $(PINC) -o H5PartTestP H5PartTestP.o $(H5PLIB) -lpH5Part $(PLIBS)
H5PartTestP.o: H5PartTest.cc
$(MPICXX) $(PCFLAGS) $(PINC) -DREGRESSIONTEST -c H5PartTest.cc -o H5PartTestP.o
#####################################################################################################################
H5PartTestParallel: H5PartTestParallel.o
$(MPICXX) -o H5PartTestParallel H5PartTestParallel.o $(H5PLIB) -lpH5Part $(PLIBS)
H5PartTestParallel.o: H5PartTestParallel.cc
$(MPICXX) $(PCFLAGS) $(PINC) -c H5PartTestParallel.cc
#####################################################################################################################
H5PartAndreasTest: H5PartAndreasTest.o
$(MPICXX) -o H5PartAndreasTest H5PartAndreasTest.o $(H5PLIB) -lpH5Part $(PLIBS)
H5PartAndreasTest.o: H5PartAndreasTest.cc
$(MPICXX) $(PCFLAGS) $(PINC) -c H5PartAndreasTest.cc
#####################################################################################################################
H5testF: H5testF.o
$(FC) -o H5testF H5testF.o $(H5PLIB) -lH5PartF -lH5Part $(LIBS)
H5testF.o: H5testF.f
$(FC) $(FFLAGS) -c $(H5PINC) H5testF.f
#####################################################################################################################
H5testFpar: H5testFpar.o
$(MPIFC) -o H5testFpar H5testFpar.o $(H5PLIB) -lpH5PartF -lpH5Part $(PLIBS)
H5testFpar.o: H5testFpar.f
$(MPIFC) $(PFFLAGS) -c $(H5PINC) H5testFpar.f
#####################################################################################################################
H5test: H5test.o
$(CXX) -o H5test H5test.o $(H5PLIB) -lH5Part $(LIBS)
H5test.o: H5test.cc
$(CXX) $(CFLAGS) $(INC) -DREGRESSIONTEST -g -c H5test.cc
#####################################################################################################################
H5BlockTest1: H5BlockTest1.o
$(CC) -o $@ $< $(H5PLIB) -lH5Part $(LIBS)
H5BlockTest1.o: H5BlockTest1.c
$(CC) $(CFLAGS) $(INC) -g -c $<
H5BlockWrite1: H5BlockWrite1.o
$(CC) -o $@ $< $(H5PLIB) -lH5Part $(LIBS)
H5BlockWrite1.o: H5BlockWrite1.c
$(CC) $(CFLAGS) $(INC) -g -c $<
H5BlockReadParallel1: H5BlockReadParallel1.o
$(CC) -o $@ $< $(H5PLIB) -lpH5Part $(PLIBS)
H5BlockReadParallel1.o: H5BlockReadParallel1.c
$(CC) $(CFLAGS) -DPARALLEL_IO $(INC) -g -c $<
#####################################################################################################################
H5BlockParallelTest1: H5BlockParallelTest1.o
$(MPICC) -o $@ $< $(H5PLIB) -lpH5Part $(PLIBS)
H5BlockParallelTest1.o: H5BlockParallelTest1.c
$(MPICC) $(PCFLAGS) $(PINC) -c $<
#####################################################################################################################
clean:
rm -f *~ *.o *.o H5testF H5PartTest H5PartTestP H5PartAndreasTest H5PartTestParallel H5testFpar Bench
distclean: clean
rm -fr H5testF H5PartTest H5PartTestP H5PartAndreasTest H5PartTestParallel H5test
rm -rf .deps
rm -rf .libs
rm -f parttest.h5
rm -rf config.status config.log config.h Makefile
#####################################################################################################################