Skip to content

Commit 9fc6d6b

Browse files
committed
Testing (only printf testing for now)
1 parent b4fbfc1 commit 9fc6d6b

File tree

2 files changed

+287
-1
lines changed

2 files changed

+287
-1
lines changed

test/CoreTest.cpp

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
# define OPENPMD_private public
44
# define OPENPMD_protected public
55
#endif
6+
7+
#include "openPMD/ChunkInfo.hpp"
68
#include "openPMD/openPMD.hpp"
79

810
#include <catch2/catch.hpp>
@@ -19,11 +21,78 @@
1921

2022
using namespace openPMD;
2123

24+
namespace test_chunk_assignment
25+
{
26+
using namespace openPMD::chunk_assignment;
27+
struct Params
28+
{
29+
ChunkTable table;
30+
RankMeta metaSource;
31+
RankMeta metaSink;
32+
33+
void
34+
init(
35+
size_t sourceRanks,
36+
size_t sinkRanks,
37+
size_t in_per_host,
38+
size_t out_per_host )
39+
{
40+
for( size_t rank = 0; rank < sourceRanks; ++rank )
41+
{
42+
table.emplace_back(
43+
Offset{ rank, rank }, Extent{ rank, rank }, rank );
44+
table.emplace_back(
45+
Offset{ rank, 100 * rank }, Extent{ rank, 100 * rank }, rank );
46+
metaSource.emplace( rank, std::to_string( rank / in_per_host ) );
47+
}
48+
for( size_t rank = 0; rank < sinkRanks; ++rank )
49+
{
50+
metaSink.emplace( rank, std::to_string( rank / out_per_host ) );
51+
}
52+
}
53+
};
54+
void print( RankMeta const & meta, ChunkTable const & table )
55+
{
56+
for( auto const & chunk : table )
57+
{
58+
std::cout << "[HOST: " << meta.at( chunk.sourceID )
59+
<< ",\tRank: " << chunk.sourceID << ",\tOffset: ";
60+
for( auto offset : chunk.offset )
61+
{
62+
std::cout << offset << ", ";
63+
}
64+
std::cout << "\tExtent: ";
65+
for( auto extent : chunk.extent )
66+
{
67+
std::cout << extent << ", ";
68+
}
69+
std::cout << "]" << std::endl;
70+
}
71+
}
72+
} // namespace test_chunk_assignment
73+
74+
TEST_CASE( "chunk_assignment", "[core]" )
75+
{
76+
using namespace chunk_assignment;
77+
test_chunk_assignment::Params params;
78+
params.init( 6, 2, 2, 1 );
79+
test_chunk_assignment::print( params.metaSource, params.table );
80+
ByHostname byHostname( make_unique< RoundRobin >() );
81+
FromPartialStrategy fullStrategy(
82+
make_unique< ByHostname >( std::move( byHostname ) ),
83+
make_unique< BinPacking >() );
84+
ChunkTable res = assignChunks(
85+
params.table, params.metaSource, params.metaSink, fullStrategy );
86+
std::cout << "\nRESULTS:" << std::endl;
87+
test_chunk_assignment::print( params.metaSink, res );
88+
}
89+
2290
TEST_CASE( "versions_test", "[core]" )
2391
{
2492
auto const apiVersion = getVersion( );
2593
REQUIRE(2u == std::count_if(apiVersion.begin(), apiVersion.end(), []( char const c ){ return c == '.';}));
2694

95+
2796
auto const standard = getStandard( );
2897
REQUIRE(standard == "1.1.0");
2998

test/ParallelIOTest.cpp

Lines changed: 218 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
#include "openPMD/auxiliary/Environment.hpp"
55
#include "openPMD/auxiliary/Filesystem.hpp"
66
#include "openPMD/openPMD.hpp"
7+
// @todo change includes
8+
#include "openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp"
79
#include <catch2/catch.hpp>
810

911
#if openPMD_HAVE_MPI
@@ -1108,4 +1110,219 @@ TEST_CASE( "adios2_ssc", "[parallel][adios2]" )
11081110
{
11091111
adios2_ssc();
11101112
}
1111-
#endif
1113+
1114+
void adios2_chunk_distribution()
1115+
{
1116+
/*
1117+
* This test simulates a multi-node streaming setup in order to test some
1118+
* of our chunk distribution strategies.
1119+
* We don't actually stream (but write a .bp file instead) and also we don't
1120+
* actually run anything on multiple nodes, but we can use this for testing
1121+
* the distribution strategies anyway.
1122+
*/
1123+
int mpi_size{ -1 };
1124+
int mpi_rank{ -1 };
1125+
MPI_Comm_size( MPI_COMM_WORLD, &mpi_size );
1126+
MPI_Comm_rank( MPI_COMM_WORLD, &mpi_rank );
1127+
1128+
/*
1129+
* Mappings: MPI rank -> hostname where the rank is executed.
1130+
* For the writing application as well as for the reading one.
1131+
*/
1132+
chunk_assignment::RankMeta writingRanksHostnames, readingRanksHostnames;
1133+
for( int i = 0; i < mpi_size; ++i )
1134+
{
1135+
/*
1136+
* The mapping is intentionally weird. Nodes "node1", "node3", ...
1137+
* do not have instances of the reading application running on them.
1138+
* Our distribution strategies will need to deal with that situation.
1139+
*/
1140+
// 0, 0, 1, 1, 2, 2, 3, 3 ...
1141+
writingRanksHostnames[ i ] = "node" + std::to_string( i / 2 );
1142+
// 0, 0, 0, 0, 2, 2, 2, 2 ...
1143+
readingRanksHostnames[ i ] = "node" + std::to_string( i / 4 * 2 );
1144+
}
1145+
1146+
std::string filename = "../samples/adios2_chunk_distribution.bp";
1147+
// Simulate a stream: BP4 assigns chunk IDs by subfile (i.e. aggregator).
1148+
std::stringstream parameters;
1149+
parameters << R"END(
1150+
{
1151+
"adios2":
1152+
{
1153+
"engine":
1154+
{
1155+
"type": "bp4",
1156+
"parameters":
1157+
{
1158+
"NumAggregators":)END"
1159+
<< "\"" << std::to_string( mpi_size ) << "\""
1160+
<< R"END(
1161+
}
1162+
}
1163+
}
1164+
}
1165+
)END";
1166+
1167+
auto printAssignment = [ mpi_rank ](
1168+
std::string const & strategyName,
1169+
ChunkTable const & table,
1170+
chunk_assignment::RankMeta const & meta )
1171+
{
1172+
if( mpi_rank != 0 )
1173+
{
1174+
return;
1175+
}
1176+
std::cout << "WITH STRATEGY '" << strategyName << "':\n";
1177+
for( auto const & chunk : table )
1178+
{
1179+
std::cout << "[HOST: " << meta.at( chunk.sourceID )
1180+
<< ",\tRank: " << chunk.sourceID << ",\tOffset: ";
1181+
for( auto offset : chunk.offset )
1182+
{
1183+
std::cout << offset << ", ";
1184+
}
1185+
std::cout << "\tExtent: ";
1186+
for( auto extent : chunk.extent )
1187+
{
1188+
std::cout << extent << ", ";
1189+
}
1190+
std::cout << "]" << std::endl;
1191+
}
1192+
};
1193+
1194+
// Create a dataset.
1195+
{
1196+
Series series(
1197+
filename,
1198+
openPMD::Access::CREATE,
1199+
MPI_COMM_WORLD,
1200+
parameters.str() );
1201+
/*
1202+
* The writing application sets an attribute that tells the reading
1203+
* application about the "MPI rank -> hostname" mapping.
1204+
* Each rank only needs to set its own value.
1205+
* (Some other options like setting all at once or reading from a file
1206+
* exist as well.)
1207+
*/
1208+
series.setMpiRanksMetaInfo( writingRanksHostnames.at( mpi_rank ) );
1209+
1210+
auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ];
1211+
openPMD::Dataset ds(
1212+
openPMD::Datatype::INT, { unsigned( mpi_size ), 10 } );
1213+
E_x.resetDataset( ds );
1214+
std::vector< int > data( 10, 0 );
1215+
std::iota( data.begin(), data.end(), 0 );
1216+
E_x.storeChunk( data, { unsigned( mpi_rank ), 0 }, { 1, 10 } );
1217+
series.flush();
1218+
}
1219+
1220+
{
1221+
Series series( filename, openPMD::Access::READ_ONLY, MPI_COMM_WORLD );
1222+
/*
1223+
* Inquire the writing application's "MPI rank -> hostname" mapping.
1224+
* The reading application needs to know about its own mapping.
1225+
* Having both of these mappings is the basis for an efficient chunk
1226+
* distribution since we can use it to figure out which instances
1227+
* are running on the same nodes.
1228+
*/
1229+
auto rankMetaIn = series.mpiRanksMetaInfo();
1230+
REQUIRE( rankMetaIn == writingRanksHostnames );
1231+
1232+
auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ];
1233+
/*
1234+
* Ask the backend which chunks are available.
1235+
*/
1236+
auto const chunkTable = E_x.availableChunks();
1237+
1238+
printAssignment( "INPUT", chunkTable, rankMetaIn );
1239+
1240+
using namespace chunk_assignment;
1241+
1242+
/*
1243+
* Assign the chunks by distributing them one after the other to reading
1244+
* ranks. Easy, but not particularly efficient.
1245+
*/
1246+
RoundRobin roundRobinStrategy;
1247+
auto roundRobinAssignment = assignChunks(
1248+
chunkTable, rankMetaIn, readingRanksHostnames, roundRobinStrategy );
1249+
printAssignment(
1250+
"ROUND ROBIN", roundRobinAssignment, readingRanksHostnames );
1251+
1252+
/*
1253+
* Assign chunks by hostname.
1254+
* Two difficulties:
1255+
* * A distribution strategy within one node needs to be picked.
1256+
* We pick the BinPacking strategy that tries to assign chunks in a
1257+
* balanced manner. Since our chunks have a small extent along
1258+
* dimension 0, use dimension 1 for slicing.
1259+
* * The assignment is partial since some nodes only have instances of
1260+
* the writing application. Those chunks remain unassigned.
1261+
*/
1262+
ByHostname byHostname(
1263+
std::make_unique< BinPacking >( /* splitAlongDimension = */ 1 ) );
1264+
auto byHostnamePartialAssignment = assignChunks(
1265+
chunkTable, rankMetaIn, readingRanksHostnames, byHostname );
1266+
printAssignment(
1267+
"HOSTNAME, ASSIGNED",
1268+
byHostnamePartialAssignment.assigned,
1269+
readingRanksHostnames );
1270+
printAssignment(
1271+
"HOSTNAME, LEFTOVER",
1272+
byHostnamePartialAssignment.notAssigned,
1273+
rankMetaIn );
1274+
1275+
/*
1276+
* Assign chunks by hostnames, once more.
1277+
* This time, apply a secondary distribution strategy to assign
1278+
* leftovers. We pick BinPacking, once more.
1279+
* Notice that the BinPacking strategy does not (yet) take into account
1280+
* chunks that have been assigned by the first round.
1281+
* Balancing is calculated solely based on the leftover chunks from the
1282+
* first round.
1283+
*/
1284+
FromPartialStrategy fromPartialStrategy(
1285+
std::make_unique< ByHostname >( std::move( byHostname ) ),
1286+
std::make_unique< BinPacking >( /* splitAlongDimension = */ 1 ) );
1287+
auto fromPartialAssignment = assignChunks(
1288+
chunkTable,
1289+
rankMetaIn,
1290+
readingRanksHostnames,
1291+
fromPartialStrategy );
1292+
printAssignment(
1293+
"HOSTNAME WITH SECOND PASS",
1294+
fromPartialAssignment,
1295+
readingRanksHostnames );
1296+
1297+
/*
1298+
* Assign chunks by slicing the n-dimensional physical domain and
1299+
* intersecting those slices with the available chunks from the backend.
1300+
* Notice that this strategy only returns the chunks that the currently
1301+
* running rank is supposed to load, whereas the other strategies return
1302+
* a chunk table containing all chunks that all ranks will load.
1303+
* In principle, a chunk_assignment::Strategy only needs to return the
1304+
* chunks that the current rank should load, but is free to emplace the
1305+
* other chunks for other reading ranks as well.
1306+
* (Reasoning: In some strategies, calculating everything is necessary,
1307+
* in others such as this one, it's an unneeded overhead.)
1308+
*/
1309+
ByCuboidSlice cuboidSliceStrategy(
1310+
std::make_unique< OneDimensionalBlockSlicer >( 1 ),
1311+
E_x.getExtent(),
1312+
mpi_rank,
1313+
mpi_size );
1314+
auto cuboidSliceAssignment = assignChunks(
1315+
chunkTable,
1316+
rankMetaIn,
1317+
readingRanksHostnames,
1318+
cuboidSliceStrategy );
1319+
printAssignment(
1320+
"CUBOID SLICE", cuboidSliceAssignment, readingRanksHostnames );
1321+
}
1322+
}
1323+
1324+
TEST_CASE( "adios2_chunk_distribution", "[parallel][adios2]" )
1325+
{
1326+
adios2_chunk_distribution();
1327+
}
1328+
#endif // openPMD_HAVE_ADIOS2 && openPMD_HAVE_MPI

0 commit comments

Comments
 (0)