10 std::vector<std::vector<double> > ¶llelTimes,
16 MPI_Comm_size(inComm,&numProcs);
18 parallelTimes.resize(3);
19 parallelTimes[0].resize(nTimes);
20 parallelTimes[1].resize(nTimes);
21 parallelTimes[2].resize(nTimes);
23 MPI_Reduce(processTimes,&(parallelTimes[0][0]),nTimes,MPI_DOUBLE,MPI_MIN,0,inComm);
24 MPI_Reduce(processTimes,&(parallelTimes[1][0]),nTimes,MPI_DOUBLE,MPI_MAX,0,inComm);
25 MPI_Reduce(processTimes,&(parallelTimes[2][0]),nTimes,MPI_DOUBLE,MPI_SUM,0,inComm);
27 for(
unsigned int iTime = 0;iTime < nTimes;iTime++){
28 parallelTimes[2][iTime] /= numProcs;
51 std::ostream &messageStream)
55 if(numDimensions <= 0){
56 messageStream <<
"WARNING: No dimension configured for parallel topology, defaulting to 3.\n";
60 std::ostringstream Ostr;
61 Ostr <<
"Dimension of parallel topology: " << numDimensions << std::endl;
62 messageStream << Ostr.str();
65 std::vector<int> &dimDir(topologyInfo.
dimDir);
68 std::vector<int> &isPeriodic(topologyInfo.
isPeriodic);
69 std::vector<int> &topoCoords(topologyInfo.
topoCoords);
72 dimDir.resize(numDimensions,0);
73 if(isPeriodic.size() != numDimensions)
74 isPeriodic.resize(numDimensions,0);
75 topoCoords.resize(numDimensions,0);
77 int numNodes = parallelCommunicator.
Size();
78 Ostr <<
"Total number of processes in topology: " << numNodes << std::endl;
79 messageStream << Ostr.str();
82 if(!cartDecomp.empty()){
85 messageStream <<
"Using user-specified decomp map.\n";
87 }
else if (!cartDirs.empty()){
89 messageStream <<
"Using user-specified directional guidance for decomp.\n";
90 int numCartDims = numDimensions;
91 std::vector<int>::iterator cartDirIt = cartDirs.begin();
92 std::vector<int>::iterator dimDirIt = dimDir.begin();
94 while(dimDirIt != dimDir.end() && cartDirIt != cartDirs.end()){
96 if(*cartDirIt++ == 0){
105 if(numCartDims < numDimensions){
106 if(numCartDims == 1){
107 dimDirIt = dimDir.begin();
108 while(dimDirIt != dimDir.end()){
110 *dimDirIt = parallelCommunicator.
Size();
113 }
else if(numNodes > 1){
117 int numFac = primeFactors.size();
123 while(numFac > numCartDims){
124 primeFactors[1] *= primeFactors.front();
125 primeFactors.pop_front();
126 std::sort(primeFactors.begin(),primeFactors.end());
129 dimDirIt = dimDir.begin();
130 while(dimDirIt != dimDir.end() && !primeFactors.empty()){
132 if(!primeFactors.empty()){
133 *dimDirIt = primeFactors.front();
134 primeFactors.pop_front();
146 isPeriodic,
true,topoCommunicator)){
147 messageStream <<
"Initialization of Cartesian process topology failed.\n";
152 topologyInfo.
rank = topoCommunicator.
Rank();
182 if(testComm.
Check()){
std::vector< int > cartDecompDirections
std::vector< int > neighborRanks
int CartNeighbors(std::vector< int > &neighborRanks)
void RenewStream(std::ostringstream &outStream)
bool CheckResult(bool &localResult, pcpp::CommunicatorType &testComm)
std::vector< int > topoCoords
std::vector< int > cartDecomposition
int InitializeCartesianTopology(int numNodes, int numDims, std::vector< int > &dimDir, const std::vector< int > &isPeriodic, bool reOrder, CommunicatorObject &cartComm)
std::deque< size_t > PrimeFactors(size_t inNumber)
std::vector< int > dimDir
void ReduceTimers(double *processTimes, unsigned int nTimes, std::vector< std::vector< double > > ¶llelTimes, pcpp::CommunicatorType &inCommunicator)
std::vector< int > isPeriodic
MPI_Comm GetCommunicator() const
int Check(comm::Ops op=comm::MAXOP)
Main encapsulation of MPI.
int SetupCartesianTopology(pcpp::CommunicatorType ¶llelCommunicator, pcpp::ParallelTopologyInfoType &topologyInfo, pcpp::CommunicatorType &topoCommunicator, std::ostream &messageStream)
Sets up a communicator with Cartesian topology.
std::vector< int > & CartCoordinates()