2
Copyright (c) 1994 - 2010, Lawrence Livermore National Security, LLC.
6
This file is part of Silo. For details, see silo.llnl.gov.
8
Redistribution and use in source and binary forms, with or without
9
modification, are permitted provided that the following conditions
12
* Redistributions of source code must retain the above copyright
13
notice, this list of conditions and the disclaimer below.
14
* Redistributions in binary form must reproduce the above copyright
15
notice, this list of conditions and the disclaimer (as noted
16
below) in the documentation and/or other materials provided with
18
* Neither the name of the LLNS/LLNL nor the names of its
19
contributors may be used to endorse or promote products derived
20
from this software without specific prior written permission.
22
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
26
LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
27
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
28
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
This work was produced at Lawrence Livermore National Laboratory under
36
Contract No. DE-AC52-07NA27344 with the DOE. Neither the United
37
States Government nor Lawrence Livermore National Security, LLC nor
38
any of their employees, makes any warranty, express or implied, or
39
assumes any liability or responsibility for the accuracy,
40
completeness, or usefulness of any information, apparatus, product, or
41
process disclosed, or represents that its use would not infringe
42
privately-owned rights. Any reference herein to any specific
43
commercial products, process, or services by trade name, trademark,
44
manufacturer or otherwise does not necessarily constitute or imply its
45
endorsement, recommendation, or favoring by the United States
46
Government or Lawrence Livermore National Security, LLC. The views and
47
opinions of authors expressed herein do not necessarily state or
48
reflect those of the United States Government or Lawrence Livermore
49
National Security, LLC, and shall not be used for advertising or
50
product endorsement purposes.
58
void WriteMultiXXXObjects(DBfile *siloFile, PMPIO_baton_t *bat, int size,
59
const char *file_ext);
62
/*-----------------------------------------------------------------------------
63
* Purpose: Impliment the create callback to initialize pmpio
64
* Will create the silo file and the 'first' directory (namespace)
65
* in it. The driver type (DB_PDB or DB_HDF5) is passed as user
66
* data; a void pointer to the driver determined in main.
67
*-----------------------------------------------------------------------------
69
void *CreateSiloFile(const char *fname, const char *nsname, void *userData)
71
int driver = *((int*) userData);
72
DBfile *siloFile = DBCreate(fname, DB_CLOBBER, DB_LOCAL, "pmpio testing", driver);
75
DBMkDir(siloFile, nsname);
76
DBSetDir(siloFile, nsname);
78
return (void *) siloFile;
81
/*-----------------------------------------------------------------------------
82
* Purpose: Impliment the open callback to initialize pmpio
83
* Will open the silo file and, for write, create the new
84
* directory or, for read, just cd into the right directory.
85
*-----------------------------------------------------------------------------
87
void *OpenSiloFile(const char *fname, const char *nsname, PMPIO_iomode_t ioMode,
90
DBfile *siloFile = DBOpen(fname, DB_UNKNOWN,
91
ioMode == PMPIO_WRITE ? DB_APPEND : DB_READ);
94
if (ioMode == PMPIO_WRITE)
95
DBMkDir(siloFile, nsname);
96
DBSetDir(siloFile, nsname);
98
return (void *) siloFile;
101
/*-----------------------------------------------------------------------------
102
* Purpose: Impliment the close callback for pmpio
103
*-----------------------------------------------------------------------------
105
void CloseSiloFile(void *file, void *userData)
107
DBfile *siloFile = (DBfile *) file;
112
/*-----------------------------------------------------------------------------
115
* Purpose: Demonstrate use of PMPIO
117
* This simple program demonstrates the use of PMPIO to write a set of silo
118
* files for a very simple quad mesh. The mesh will be N x N where N is the
119
* number of processors. Each processor will write a single 1 x N strip of
120
* zones (2 nodes and 1 zone wide). The second command line argument
121
* indicates the total number of files that will be produced. Note that this
122
* is totally independent of the number of processors. The resulting silo
123
* file can be visualized, in parallel, in VisIt by opening the 'root' file
124
* named "silo_000". Note that PMPIO's role is merely to coordinate and
125
* manage access to the silo file(s) that get created.
127
* By default, this example will use Silo's PDB driver. However, if you pass
128
* "DB_HDF5" anywhere on the command line, it will use the HDF5 driver. Any
129
* integer appearing on the command line is taken to be the total number of
132
* An example of how you would invoke this example is...
134
* mpirun -np 17 pmpio_silo_test_mesh 3 DB_HDF5
136
* which would run on 17 processors, creating a 17x17 mesh but writing it to
137
* 3 files using the HDF5 driver.
138
*-----------------------------------------------------------------------------
140
int main(int argc, char **argv)
146
char *file_ext = "pdb";
147
char fileName[256], nsName[256];
148
int i, j, dims[2], ndims = 2;
149
char *coordnames[2], *varnames[2];
150
float *x, *y, *coords[2], *vars[2];
155
/* specify the number of Silo files to create and driver */
156
for (i = 1; i < argc; i++)
158
if (!strcmp(argv[i], "DB_HDF5"))
163
else if (strtol(argv[i], 0, 10) > 0)
165
numGroups = strtol(argv[i], 0, 10);
167
else if (argv[i][0] != '\0')
169
fprintf(stderr, "%s: ignored argument `%s'\n", argv[0], argv[i]);
173
/* standard MPI initialization stuff */
174
MPI_Init(&argc, &argv);
175
MPI_Comm_size(MPI_COMM_WORLD, &size);
176
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
183
/* Ensure all procs agree on numGroups, driver and file_ext */
184
MPI_Bcast(&numGroups, 1, MPI_INT, 0, MPI_COMM_WORLD);
185
MPI_Bcast(&driver, 1, MPI_INT, 0, MPI_COMM_WORLD);
186
if (driver == DB_HDF5)
189
/* Initialize PMPIO, pass a pointer to the driver type as the
191
bat = PMPIO_Init(numGroups, PMPIO_WRITE, MPI_COMM_WORLD, 1,
192
CreateSiloFile, OpenSiloFile, CloseSiloFile, &driver);
194
/* Construct names for the silo files and the directories in them */
195
sprintf(fileName, "silo_%03d.%s", PMPIO_GroupRank(bat, rank), file_ext);
196
sprintf(nsName, "domain_%03d", rank);
198
/* Wait for write access to the file. All processors call this.
199
* Some processors (the first in each group) return immediately
200
* with write access to the file. Other processors wind up waiting
201
* until they are given control by the preceeding processor in
202
* the group when that processor calls "HandOffBaton" */
203
siloFile = (DBfile *) PMPIO_WaitForBaton(bat, fileName, nsName);
205
/* Do some work to construct the mesh data for this processor. */
208
coordnames[0] = "xcoords";
209
coordnames[1] = "ycoords";
210
x = (float *) malloc(2 * sizeof(float));
212
x[1] = (float) rank+1;
213
y = (float *) malloc(size * sizeof(float));
214
for (i = 0; i < size; i++)
219
/* Do some work to create some simple variable data for this processor. */
220
vx = (float *) malloc(2 * size * sizeof(float));
221
vy = (float *) malloc(2 * size * sizeof(float));
222
for (j = 0; j < size; j++)
224
for (i = 0; i < 2; i++)
226
vx[j*2+i] = ((float) (size-(rank+i)))/((float) (2*size));
227
vy[j*2+i] = ((float) ((size/2) - (rank+i))) / ((float) size);
230
temp = (float *) malloc(size * sizeof(float));
231
for (i = 0; i < size; i++)
232
temp[i] = (float) i * rank;
236
* BEGIN THIS PROCESSOR'S LOCAL WORK ON A SILO FILE
240
/* This processor's local work on the file */
241
DBPutQuadmesh(siloFile, "qmesh", coordnames, coords, dims, ndims,
242
DB_FLOAT, DB_COLLINEAR, 0);
246
/* Output velocity on the mesh */
251
DBPutQuadvar(siloFile, "velocity", "qmesh", 2, varnames, vars,
252
dims, ndims, NULL, 0, DB_FLOAT, DB_NODECENT, 0);
256
/* Output temp on the mesh */
259
DBPutQuadvar1(siloFile, "temp", "qmesh", temp, dims, ndims,
260
NULL, 0, DB_FLOAT, DB_ZONECENT, 0);
265
* END THIS PROCESSORS LOCAL WORK ON THE FILE
269
/* If this is the 'root' processor, also write Silo's multi-XXX objects */
271
WriteMultiXXXObjects(siloFile, bat, size, file_ext);
273
/* Hand off the baton to the next processor. This winds up closing
274
* the file so that the next processor that opens it can be assured
275
* of getting a consistent and up to date view of the file's contents. */
276
PMPIO_HandOffBaton(bat, siloFile);
278
/* We're done using PMPIO, so finish it off */
281
/* Standard MPI finalization */
288
void WriteMultiXXXObjects(DBfile *siloFile, PMPIO_baton_t *bat, int size,
289
const char *file_ext)
292
char **meshBlockNames = (char **) malloc(size * sizeof(char*));
293
char **tempBlockNames = (char **) malloc(size * sizeof(char*));
294
char **velBlockNames = (char **) malloc(size * sizeof(char*));
295
int *blockTypes = (int *) malloc(size * sizeof(int));
296
int *varTypes = (int *) malloc(size * sizeof(int));
298
/* Go to root directory in the silo file */
299
DBSetDir(siloFile, "/");
301
/* Construct the lists of individual object names */
302
for (i = 0; i < size; i++)
304
int groupRank = PMPIO_GroupRank(bat, i);
305
meshBlockNames[i] = (char *) malloc(1024);
306
velBlockNames[i] = (char *) malloc(1024);
307
tempBlockNames[i] = (char *) malloc(1024);
310
/* this mesh block is in the file 'root' owns */
311
sprintf(meshBlockNames[i], "/domain_%03d/qmesh", i);
312
sprintf(velBlockNames[i], "/domain_%03d/velocity", i);
313
sprintf(tempBlockNames[i], "/domain_%03d/temp", i);
317
/* this mesh block is another file */
318
sprintf(meshBlockNames[i], "silo_%03d.%s:/domain_%03d/qmesh",
319
groupRank, file_ext, i);
320
sprintf(velBlockNames[i], "silo_%03d.%s:/domain_%03d/velocity",
321
groupRank, file_ext, i);
322
sprintf(tempBlockNames[i], "silo_%03d.%s:/domain_%03d/temp",
323
groupRank, file_ext, i);
325
blockTypes[i] = DB_QUADMESH;
326
varTypes[i] = DB_QUADVAR;
329
/* Write the multi-block objects */
330
DBPutMultimesh(siloFile, "multi_qmesh", size, meshBlockNames, blockTypes, 0);
331
DBPutMultivar(siloFile, "multi_velocity", size, velBlockNames, varTypes, 0);
332
DBPutMultivar(siloFile, "multi_temp", size, tempBlockNames, varTypes, 0);
335
for (i = 0; i < size; i++)
337
free(meshBlockNames[i]);
338
free(velBlockNames[i]);
339
free(tempBlockNames[i]);
341
free(meshBlockNames);
343
free(tempBlockNames);