@@ -55,10 +55,10 @@ namespace libmpdataxx
5555 H5::PredType::NATIVE_FLOAT,
5656 flttype_output = H5::PredType::NATIVE_FLOAT; // using floats not to waste disk space
5757
58- blitz::TinyVector<hsize_t , parent_t ::n_dims> cshape, shape, chunk, srfcshape, srfcchunk, offst, shape_h, chunk_h, offst_h;
58+ blitz::TinyVector<hsize_t , parent_t ::n_dims> cshape, shape, chunk, srfcshape, srfcchunk, offst, shape_h, chunk_h, offst_h, shape_mem_h, offst_mem_h ;
5959 H5::DSetCreatPropList params;
6060
61- H5::DataSpace sspace, cspace, srfcspace, sspace_h;
61+ H5::DataSpace sspace, cspace, srfcspace, sspace_h, sspace_mem_h ;
6262#if defined(USE_MPI)
6363 hid_t fapl_id;
6464#endif
@@ -97,11 +97,13 @@ namespace libmpdataxx
9797 // x,y,z
9898 offst = 0 ;
9999 offst_h = 0 ;
100+ offst_mem_h = 0 ;
100101
101102 for (int d = 0 ; d < parent_t ::n_dims; ++d)
102103 {
103- shape[d] = this ->mem ->distmem .grid_size [d];
104- shape_h[d] = this ->mem ->distmem .grid_size [d] + 2 * this ->halo ;
104+ shape[d] = this ->mem ->distmem .grid_size [d]; // shape of arrays stored in file
105+ shape_h[d] = this ->mem ->distmem .grid_size [d] + 2 * this ->halo ; // shape of arrays with halos stored in files
106+ shape_mem_h[d] = this ->mem ->grid_size [d].length () + 2 * this ->halo ; // shape of the array with halo stored in memory of given MPI rank
105107 }
106108
107109 chunk = shape;
@@ -113,10 +115,11 @@ namespace libmpdataxx
113115 srfcshape = shape;
114116 *(srfcshape.end ()-1 ) = 1 ;
115117
116- sspace = H5::DataSpace (parent_t ::n_dims, shape.data ());
117- sspace_h = H5::DataSpace (parent_t ::n_dims, shape_h.data ());
118- srfcspace = H5::DataSpace (parent_t ::n_dims, srfcshape.data ());
119- cspace = H5::DataSpace (parent_t ::n_dims, cshape.data ());
118+ sspace = H5::DataSpace (parent_t ::n_dims, shape.data ());
119+ sspace_h = H5::DataSpace (parent_t ::n_dims, shape_h.data ());
120+ sspace_mem_h = H5::DataSpace (parent_t ::n_dims, shape_mem_h.data ());
121+ srfcspace = H5::DataSpace (parent_t ::n_dims, srfcshape.data ());
122+ cspace = H5::DataSpace (parent_t ::n_dims, cshape.data ());
120123
121124#if defined(USE_MPI)
122125 if (this ->mem ->distmem .size () > 1 )
@@ -133,8 +136,10 @@ namespace libmpdataxx
133136 if (this ->mem ->distmem .rank () == this ->mem ->distmem .size () - 1 )
134137 cshape[0 ] += 1 ;
135138
136- offst[0 ] = this ->mem ->grid_size [0 ].first ();
137- offst_h[0 ] = this ->mem ->distmem .rank () == 0 ? 0 : this ->mem ->grid_size [0 ].first () + this ->halo ;
139+ offst[0 ] = this ->mem ->grid_size [0 ].first ();
140+ offst_h[0 ] = this ->mem ->distmem .rank () == 0 ? 0 : this ->mem ->grid_size [0 ].first () + this ->halo ;
141+ if (this ->mem ->distmem .rank () > 0 )
142+ offst_mem_h[0 ] = this ->halo ;
138143
139144 // chunk size has to be common to all processes !
140145 // TODO: set to 1? Test performance...
@@ -402,11 +407,11 @@ namespace libmpdataxx
402407 // revert to default chunk
403408 params.setChunk (parent_t ::n_dims, chunk.data ());
404409
405- record_dsc_helper (aux, arr);
406-
407410 auto space = aux.getSpace ();
408411 space.selectHyperslab (H5S_SELECT_SET, shape_h.data (), offst_h.data ());
409- aux.write (arr.data (), flttype_solver, H5::DataSpace (parent_t ::n_dims, shape_h.data ()), space, dxpl_id);
412+ sspace_mem_h.selectHyperslab (H5S_SELECT_SET, shape_h.data (), offst_mem_h.data ());
413+
414+ aux.write (arr.data (), flttype_solver, sspace_mem_h, space, dxpl_id);
410415 }
411416
412417 void record_scalar_hlpr (const std::string &name, const std::string &group_name, typename solver_t ::real_t data, H5::H5File hdf)
0 commit comments