13 delete [] (
char *)
_buf;
14 _buf =
new char [bsize];
25 delete [] (
char *)
_buf;
29 std::ostringstream Ostr;
57 std::istringstream Istr(std::string((
const char *)
_buf,(
const char *)_buf+
_bsize));
60 delete [] (
char *)_buf;
68 std::istringstream Istr(std::string((
const char *)inbuf));
80 MPI_Initialized(&mpiInitd);
97 _comm = MPI_COMM_WORLD;
109 if(
_rc != MPI_SUCCESS)
116 if(
_rc != MPI_SUCCESS)
135 int retVal = MPI_Comm_rank(inComm,&newRank);
137 retVal = MPI_Comm_size(inComm,&newSize);
170 return(MPI_Dims_create(numNodes,numDims,&
_cart_dims[0]));
177 int topoType = MPI_UNDEFINED;
178 MPI_Topo_test(
_comm,&topoType);
179 if(topoType != MPI_CART)
183 neighborRanks.resize(numDims*2,-1);
184 int neighborPlus = MPI_PROC_NULL;
185 int neighborMinus = MPI_PROC_NULL;
186 for(
int iDim = 0;((iDim < numDims) && !retVal);iDim++){
187 retVal = MPI_Cart_shift(
_comm,iDim,1,&neighborMinus,&neighborPlus);
188 if(neighborMinus != MPI_PROC_NULL)
189 neighborRanks[iDim*2] = neighborMinus;
190 if(neighborPlus != MPI_PROC_NULL)
191 neighborRanks[iDim*2+1] = neighborPlus;
197 char hostName[MPI_MAX_PROCESSOR_NAME];
200 MPI_Get_processor_name(hostName,&hostLen);
202 retVal.assign(hostName,hostLen);
208 const std::vector<int> &isPeriodic,
bool reOrder,
214 bool autoDetermineSize =
false;
215 if(dimDir.size() != numDims)
216 autoDetermineSize =
true;
218 std::vector<int>::iterator dimDirIt = dimDir.begin();
219 while(dimDirIt != dimDir.end())
220 nProcCart *= *dimDirIt++;
222 autoDetermineSize =
true;
223 if(autoDetermineSize){
224 dimDir.resize(numDims,0);
231 if(isPeriodic.size() == numDims){
268 std::vector<MPI_Request> requests;
271 requests.push_back(*ri++);
274 requests.push_back(*ri++);
275 int count = requests.size();
277 _rc = MPI_Waitall(count,&requests[0],&
_status[0]);
279 ri = requests.begin();
280 while(ri != requests.end()){
282 if(*ri != MPI_REQUEST_NULL)
283 assert(
_status[ri-requests.begin()].MPI_ERROR == 0);
317 MPI_Initialized(&flag);
331 MPI_Initialized(&flag);
332 _comm = MPI_COMM_WORLD;
343 _rc = MPI_Init_thread(narg, args, MPI_THREAD_FUNNELED, &provided);
344 assert(provided == MPI_THREAD_FUNNELED);
348 atexit((
void (*)())MPI_Finalize);
350 if(
_rc == MPI_SUCCESS)
387 MPI_Initialized(&mpiInitd);
388 MPI_Finalized(&flag);
390 MPI_Comm_free(&
_comm);
439 int sizeofobject = 0;
440 if(
_rank == root_rank)
441 sizeofobject = sval.size();
442 _rc = MPI_Bcast(&sizeofobject,1,MPI_INT,root_rank,
_comm);
443 if(sizeofobject <= 0)
446 char *sbuf =
new char [sizeofobject+1];
447 if(
_rank == root_rank)
448 std::strncpy(sbuf,sval.c_str(),sizeofobject);
449 _rc = MPI_Bcast(sbuf,sizeofobject,MPI_CHAR,root_rank,
_comm);
450 if(
_rank != root_rank){
451 sbuf[sizeofobject] =
'\0';
452 sval.assign(std::string(sbuf));
461 unsigned int remote_rank,
int tag)
469 _rc = MPI_Isend(buf,sendsize,MPI_CHAR,remote_rank,
476 unsigned int remote_rank,
int tag)
478 _rc = MPI_Send(buf,sendsize,MPI_CHAR,remote_rank,
485 unsigned int remote_rank,
int tag)
493 _rc = MPI_Recv_init(recvbuf,recvsize,MPI_CHAR,remote_rank,
501 unsigned int remote_rank,
int tag)
509 _rc = MPI_Irecv(recvbuf,recvsize,MPI_CHAR,remote_rank,
516 unsigned int remote_rank,
int tag)
521 _rc = MPI_Recv(recvbuf,recvsize,MPI_CHAR,remote_rank,
528 unsigned int remote_rank,
int tag)
536 _rc = MPI_Send_init(sendbuf,sendsize,MPI_CHAR,remote_rank,
544 std::vector<int> allsizes(
_nproc,0);
546 std::vector<int>::iterator asi = allsizes.begin();
547 while(asi != allsizes.end()){
548 *asi = *asi*datasize;
551 std::vector<int> displacements(
_nproc,0);
552 for(
int i = 1;i <
_nproc;i++)
553 displacements[i] = displacements[i-1]+allsizes[i-1];
554 _rc = MPI_Allgatherv(sendbuf,allsizes[
_rank],MPI_CHAR,
555 recvbuf,&allsizes[0],&displacements[0],
563 int sizeofobject = 0;
564 if(
_rank == root_rank){
565 sizeofobject = mo->
Pack();
567 _rc = MPI_Bcast(&sizeofobject,1,MPI_INT,root_rank,
_comm);
568 if(sizeofobject <= 0)
571 if(
_rank != root_rank)
574 if(
_rank != root_rank){
585 local_size = sPtr->
Pack();
586 std::vector<int> allsizes;
587 std::vector<int> disps;
589 allsizes.resize(
_nproc,0);
591 allsizes[
_rank] = local_size;
593 this->
Gather(local_size,allsizes,root);
597 for(
int i = 0; i <
_nproc;i++){
598 total_size += allsizes[i];
600 disps[i] = disps[i-1]+allsizes[i-1];
604 char *recvbuffer = NULL;
606 recvbuffer =
new char [total_size];
608 _rc = MPI_Gatherv((
void *)(sPtr->
GetBuffer()),local_size,MPI_CHAR,
609 (
void *)(recvbuffer),&allsizes[0],&disps[0],MPI_CHAR,
616 for(
int i = 0;i <
_nproc;i++){
620 rVec[i]->UnPack(&recvbuffer[disps[i]]);
622 delete [] recvbuffer;
632 int nobjs = mos.size();
636 if((
_rc = MPI_Bcast(&nobjs,1,MPI_INT,root_rank,
_comm)))
638 std::vector<int> sizeofobject(nobjs,0);
640 if(
_rank == root_rank){
641 std::vector<int>::iterator si = sizeofobject.begin();
642 std::vector<MobileObject *>::iterator moi = mos.begin();
643 while(moi != mos.end()){
644 sizeofobject[si - sizeofobject.begin()] = (*moi)->Pack();
649 if((
_rc = MPI_Bcast(&sizeofobject[0],nobjs,MPI_INT,root_rank,
_comm)))
651 std::vector<int>::iterator si = sizeofobject.begin();
652 std::vector<MobileObject *>::iterator moi = mos.begin();
653 while(si != sizeofobject.end()){
654 if(
_rank != root_rank){
655 int bsize = (*moi++)->PrepareBuffer(*si);
656 assert(bsize == *si);
660 assert(total_size > 0);
663 char *bufferspace =
new char [total_size];
664 if(
_rank == root_rank){
666 char *cur_pos = bufferspace;
667 si = sizeofobject.begin();
669 while(moi != mos.end()){
670 std::memcpy(cur_pos,(*moi)->GetBuffer(),*si);
675 if((
_rc = MPI_Bcast(bufferspace,total_size,MPI_CHAR,root_rank,
_comm))){
676 delete [] bufferspace;
681 if(
_rank == root_rank){
684 while(moi != mos.end())
685 (*moi++)->DestroyBuffer();
688 char *cur_pos = bufferspace;
689 si = sizeofobject.begin();
691 while(moi != mos.end()){
692 assert((*moi)->GetBuffer() != NULL);
693 std::memcpy((*moi)->GetBuffer(),cur_pos,*si);
694 _rc += (*moi)->UnPack();
700 delete [] bufferspace;
708 local_size = sPtr->
Pack();
709 std::vector<int> allsizes(
_nproc,0);
710 std::vector<int> disps(
_nproc,0);
711 allsizes[
_rank] = local_size;
714 for(
int i = 0; i <
_nproc;i++){
715 total_size += allsizes[i];
717 disps[i] = disps[i-1]+allsizes[i-1];
720 char *recvbuffer =
new char [total_size];
721 _rc = MPI_Allgatherv((
void *)(sPtr->
GetBuffer()),local_size,MPI_CHAR,
722 (
void *)(recvbuffer),&allsizes[0],&disps[0],MPI_CHAR,
_comm);
724 for(
int i = 0;i <
_nproc;i++){
728 rVec[i]->UnPack(&recvbuffer[disps[i]]);
730 delete [] recvbuffer;
736 std::vector<int> &nsend_all,
int root)
738 int sndIcnt = sVec.size();
740 std::vector<int> local_sizes(sndIcnt,0);
741 for(
int i = 0;i < sndIcnt;i++){
742 local_sizes[i] = sVec[i]->Pack();
743 local_size += local_sizes[i];
745 std::vector<int> Allsizes;
746 this->
Gatherv(local_sizes,Allsizes,nsend_all,root);
748 std::vector<int> allsizes(
_nproc,0);
749 std::vector<int> disps(
_nproc,0);
751 for(
int i = 0; i <
_nproc; i++){
752 for(
int j = 0; j < nsend_all[i]; j++){
753 allsizes[i] += Allsizes[sindex];
755 total_size += allsizes[i];
757 disps[i] = disps[i-1]+allsizes[i-1];
759 char *sendbuffer =
new char [local_size];
760 char *recvbuffer = NULL;
762 recvbuffer =
new char [total_size];
764 char *cur_pos = sendbuffer;
765 for(
int i = 0;i < sndIcnt;i++){
766 std::memcpy(cur_pos,sVec[i]->GetBuffer(),sVec[i]->BufSize());
767 cur_pos += sVec[i]->BufSize();
769 _rc = MPI_Gatherv((
void *)sendbuffer,local_size,MPI_CHAR,
770 (
void *)recvbuffer,&allsizes[0],&disps[0],MPI_CHAR,
772 cur_pos = recvbuffer;
777 for(
int i = 0;i <
_nproc;i++){
778 for(
int j = 0;j < nsend_all[i];j++){
780 rVec[rind++]->UnPack(sendbuffer);
781 sendbuffer += local_sizes[j];
782 cur_pos += local_sizes[j];
786 rVec[rind++]->UnPack(cur_pos);
787 cur_pos += Allsizes[sind++];
791 delete [] recvbuffer;
797 std::vector<int> &nsend_all)
799 int sndIcnt = sVec.size();
801 std::vector<int> local_sizes(sndIcnt,0);
802 for(
int i = 0;i < sndIcnt;i++){
803 local_sizes[i] = sVec[i]->Pack();
804 local_size += local_sizes[i];
806 std::vector<int> Allsizes;
807 this->
AllGatherv(local_sizes,Allsizes,nsend_all);
809 std::vector<int> allsizes(
_nproc,0);
810 std::vector<int> disps(
_nproc,0);
812 for(
int i = 0; i <
_nproc; i++){
813 for(
int j = 0; j < nsend_all[i]; j++){
814 allsizes[i] += Allsizes[sindex];
816 total_size += allsizes[i];
818 disps[i] = disps[i-1]+allsizes[i-1];
820 char *sendbuffer =
new char [local_size];
821 char *recvbuffer =
new char [total_size];
822 char *cur_pos = sendbuffer;
823 for(
int i = 0;i < sndIcnt;i++){
824 std::memcpy(cur_pos,sVec[i]->GetBuffer(),sVec[i]->BufSize());
825 cur_pos += sVec[i]->BufSize();
827 std::vector<MobileObject *>::iterator svi = sVec.begin();
828 while(svi != sVec.end()){
829 (*svi)->DestroyBuffer();
832 _rc = MPI_Allgatherv((
void *)sendbuffer,local_size,MPI_CHAR,
833 (
void *)recvbuffer,&allsizes[0],&disps[0],MPI_CHAR,
_comm);
834 cur_pos = recvbuffer;
838 for(
int i = 0;i <
_nproc;i++){
839 for(
int j = 0;j < nsend_all[i];j++){
841 rVec[rind++]->UnPack(sendbuffer);
842 sendbuffer += local_sizes[j];
843 cur_pos += local_sizes[j];
847 rVec[rind++]->UnPack(cur_pos);
848 cur_pos += Allsizes[sind++];
852 delete [] recvbuffer;
868 return(MPI_UNSIGNED_CHAR);
873 return(MPI_LONG_LONG_INT);
875 return(MPI_UNSIGNED);
878 return(MPI_DATATYPE_NULL);
882 return(MPI_DATATYPE_NULL);
const void * GetBuffer() const
int StartSend(unsigned int rid)
MPI_Op ResolveOp(const comm::Ops &op)
int CartNeighbors(std::vector< int > &neighborRanks)
provides communication for complex objects.
std::vector< int > _cart_periodic
DataTypes
Supported data types.
std::vector< int > _cart_dims
std::vector< MPI_Request > _recv_requests
int _SetRecv(void *buf, int recvsize, unsigned int remote_rank, int tag=0)
int _AllGatherv(void *sendbuf, int mysendcnt, int datasize, void *recvbuf)
Defines MPI-specific parallel global and program classes.
int _SetSend(void *buf, int sendsize, unsigned int remote_rank, int tag=0)
int _BroadCastMOV(std::vector< MobileObject *> &mos, int root_rank=0)
int _GatherMOV(std::vector< MobileObject *> &sVec, std::vector< MobileObject *> &rVec, std::vector< int > &nsend_all, int root=0)
int InitializeCartesianTopology(int numNodes, int numDims, std::vector< int > &dimDir, const std::vector< int > &isPeriodic, bool reOrder, CommunicatorObject &cartComm)
std::vector< int > _cart_coords
int _AllGatherMO(MobileObject *sPtr, std::vector< MobileObject *> &rVec, int sndcnt=1)
MPI_Datatype ResolveDataType(const comm::DataTypes &dt)
int StartRecv(unsigned int rid)
int ComputeCartesianDims(int numNodes, int numDims)
MPI_Datatype IntegerTypeID
MPI_Comm GetCommunicator() const
int _GatherMO(MobileObject *sPtr, std::vector< MobileObject *> &rVec, int sndcnt, int root=0)
int _Recv(void *buf, int recvsize, unsigned int remote_rank, int tag=0)
std::vector< int > _send_tags
int _ASend(void *buf, int sendsize, unsigned int remote_rank, int tag=0)
int Check(comm::Ops op=comm::MAXOP)
int String2Buf(const std::string &instr, void **buf)
virtual int Pack(void **inbuf=NULL)
virtual void DestroyBuffer()
Main encapsulation of MPI.
int Initialize(CommunicatorObject &incomm)
int AllGather(std::vector< DataType > &sendvec, std::vector< DataType > &recvvec, int sndcnt=0, int recvcnt=0)
int Gather(DataType &sendval, std::vector< DataType > &recvvec, int root=0)
int AllGatherv(std::vector< DataType > &sendvec, std::vector< DataType > &recvvec, std::vector< int > &nsend_all)
int _AllGatherMOV(std::vector< MobileObject *> &sVec, std::vector< MobileObject *> &rVec, std::vector< int > &nsend_all)
int RenewCommunicator(MPI_Comm &inComm)
int _Send(void *buf, int sendsize, unsigned int remote_rank, int tag=0)
std::vector< MPI_Status > _status
virtual int PrepareBuffer(size_t bsize)
int _ARecv(void *buf, int recvsize, unsigned int remote_rank, int tag=0)
virtual int UnPack(const void *outbuf=NULL)
Ops
Operations for collectives.
int Split(int color, int key, CommunicatorObject &newcomm)
std::vector< MPI_Request > _send_requests
int Gatherv(std::vector< DataType > &sendvec, std::vector< DataType > &recvvec, std::vector< int > &nsend_all, int nsend=0, int root=0)
int BroadCast(std::string &sval, int root_rank=0)
std::vector< int > _recv_tags