Commit 24834d5b by Francois Gygi

Modified Context to accept MPI Comm in ctor.

Removed pimpl idiom in Context implementation.


git-svn-id: http://qboxcode.org/svn/qb/trunk@1813 cba15fb0-1239-40c8-b417-11db7ca47a34
parent 2f11cd34
......@@ -21,18 +21,116 @@
#include <iosfwd>
#include <string>
#if USE_MPI
#include <vector>
#include <cassert>
#include "blacs.h"
#include <mpi.h>
#else
typedef int MPI_Comm;
#endif
struct ContextRep
{
private:
int ictxt_;
int myrow_;
int mycol_;
int nprow_;
int npcol_;
int size_;
int myproc_;
int mype_;
bool onpe0_;
bool active_;
std::vector<int> pmap_;
MPI_Comm comm_;
// keep assignment and copy constructors private
ContextRep& operator=(const ContextRep& c);
ContextRep(const ContextRep& c);
public:
int ictxt() const { return ictxt_; }
int myrow() const { return myrow_; }
int mycol() const { return mycol_; }
int nprow() const { return nprow_; }
int npcol() const { return npcol_; }
// number of processes in the context
// returns -1 if current process is not part of this context
int size() const { return size_; }
// position of current process in row-major order
// returns -1 if current process is not part of this context
int myproc() const { return myproc_; }
int mype() const { return mype_; }
int pmap(int irow, int icol) const { return pmap_[irow+nprow_*icol]; }
bool onpe0(void) const { return onpe0_; }
bool active(void) const { return active_; }
void abort(int ierr) const { Cblacs_abort(ictxt_,ierr); }
void barrier(void) const { Cblacs_barrier(ictxt_,"A"); }
void barrier(char scope) const { Cblacs_barrier(ictxt_,&scope); }
void dsend(int m, int n, double* a, int lda, int rdest, int cdest) const;
void drecv(int m, int n, double* a, int lda, int rsrc, int csrc) const;
void dsum(char scope, char topology, int m, int n, double* a, int lda,
int rdest, int cdest) const;
void dmax(char scope, char topology, int m, int n, double* a, int lda,
int rdest, int cdest) const;
void dmax(char scope, char topology, int m, int n, double* a, int lda,
int* ra, int* ca, int rcflag, int rdest, int cdest) const;
void dmin(char scope, char topology, int m, int n, double* a, int lda,
int* ra, int* ca, int rcflag, int rdest, int cdest) const;
void dmin(char scope, char topology, int m, int n, double* a, int lda,
int rdest, int cdest) const;
void dbcast_send(char scope, char topology,
int m, int n, double* a,int lda) const;
void dbcast_recv(char scope, char topology, int m, int n, double* a, int lda,
int rsrc, int csrc) const;
void isend(int m, int n, int* a, int lda, int rdest, int cdest) const;
void irecv(int m, int n, int* a, int lda, int rsrc, int csrc) const;
void isum(char scope, char topology, int m, int n, int* a, int lda,
int rdest, int cdest) const;
void imax(char scope, char topology, int m, int n, int* a, int lda,
int* ra, int* ca, int rcflag, int rdest, int cdest) const;
void imax(char scope, char topology, int m, int n, int* a, int lda,
int rdest, int cdest) const;
void imin(char scope, char topology, int m, int n, int* a, int lda,
int* ra, int* ca, int rcflag, int rdest, int cdest) const;
void imin(char scope, char topology, int m, int n, int* a, int lda,
int rdest, int cdest) const;
void ibcast_send(char scope, char topology,
int m, int n, int* a,int lda) const;
void ibcast_recv(char scope, char topology, int m, int n, int* a, int lda,
int rsrc, int csrc) const;
void string_send(std::string& s, int rdest, int cdest) const;
void string_recv(std::string& s, int rsrc, int csrc) const;
void string_bcast(std::string& s, int isrc) const;
bool operator==(const ContextRep& c) const
{ return (ictxt_==c.ictxt());}
MPI_Comm comm(void) const { return comm_; }
// Constructors
// construct a single-row ContextRep
explicit ContextRep(MPI_Comm comm);
// global ContextRep of size nprow * npcol with column major order
explicit ContextRep(MPI_Comm comm, int nprow, int npcol);
~ContextRep();
void print(std::ostream& os) const;
};
class Context
{
private:
struct ContextImpl* pimpl_;
ContextRep* rep;
int* pcount;
public:
......@@ -54,6 +152,7 @@ class Context
bool onpe0(void) const;
bool active(void) const;
operator bool() const { return active(); }
void abort(int ierr) const;
void barrier(void) const;
void barrier(char scope) const;
......@@ -141,24 +240,48 @@ class Context
// Constructors
// default global context: construct a single-row global Context
explicit Context();
// global Context of size nprow * npcol with column major order
explicit Context(int nprow, int npcol);
// single-row Context
explicit Context(MPI_Comm comm) : rep(new ContextRep(comm)),
pcount(new int(1)) {}
// construct a Context of size nprow*npcol from the processes
// in context ctxt starting at process (irstart,icstart)
explicit Context(const Context &ctxt, int nprow, int npcol,
int irstart, int icstart);
// nprow * npcol Context
explicit Context(MPI_Comm comm, int nprow, int npcol):
rep(new ContextRep(comm,nprow,npcol)), pcount(new int(1)) {}
~Context();
// Context(ContextRep* pp) : rep(pp), pcount(new int(1)) {}
Context(const Context& ctxt);
Context& operator=(const Context& rhs);
Context(const Context& c) : rep(c.rep), pcount(c.pcount) { (*pcount)++; }
void print(std::ostream& os) const;
Context& operator=(const Context& c)
{
if ( rep == c.rep ) return *this;
if ( --(*pcount) == 0 )
{
delete rep;
delete pcount;
}
rep = c.rep;
pcount = c.pcount;
(*pcount)++;
return *this;
}
~Context(void)
{
if ( pcount == 0 )
{
std::cerr << "~Context: pcount = 0\n";
}
if ( --(*pcount) == 0 )
{
delete rep;
delete pcount;
}
}
};
std::ostream& operator << ( std::ostream& os, const Context& ctxt );
#endif
......@@ -3382,7 +3382,7 @@ void DoubleMatrix::print(ostream& os) const
{
// Copy blocks of <blocksize> columns and print them on process (0,0)
if ( m_ == 0 || n_ == 0 ) return;
Context ctxtl(1,1);
Context ctxtl(MPI_COMM_WORLD,1,1);
const int blockmemsize = 32768; // maximum memory size of a block in bytes
// compute maximum block size: must be at least 1
int maxbs = max(1, (int) ((blockmemsize/sizeof(double))/m_));
......@@ -3414,7 +3414,7 @@ void ComplexMatrix::print(ostream& os) const
{
// Copy blocks of <blocksize> columns and print them on process (0,0)
if ( m_ == 0 || n_ == 0 ) return;
Context ctxtl(1,1);
Context ctxtl(MPI_COMM_WORLD,1,1);
const int blockmemsize = 32768; // maximum memory size of a block in bytes
// compute maximum block size: must be at least 1
int maxbs = max(1, (int) ((blockmemsize/sizeof(complex<double>))/m_));
......
......@@ -44,7 +44,7 @@ class Sample
Control ctrl;
UserInterface *ui;
Sample(const Context& ctxt, UserInterface *ui_) : ctxt_(ctxt), ui(ui_),
Sample(const Context& ctxt, UserInterface *ui_ = 0) : ctxt_(ctxt), ui(ui_),
atoms(ctxt), constraints(ctxt),
extforces(ctxt), wf(ctxt), wfv(0) {}
~Sample(void) { delete wfv; }
......
......@@ -321,7 +321,7 @@ void Wavefunction::create_contexts(void)
// npr now divides size
int npc = size/npr;
spincontext_ = new Context(npr,npc);
spincontext_ = new Context(ctxt_.comm(),npr,npc);
kpcontext_ = new Context(*spincontext_);
sdcontext_ = new Context(*kpcontext_);
}
......
......@@ -70,7 +70,7 @@ int XMLGFPreprocessor::process(const char* const uri,
const Context& ctxt = gfdata.context();
// define a global single row context for segment manipulations
Context rctxt;
Context rctxt(MPI_COMM_WORLD);
#if DEBUG
if ( rctxt.onpe0() )
{
......
......@@ -143,7 +143,7 @@ int main(int argc, char **argv, char **envp)
#endif
{
Context ctxt;
Context ctxt(MPI_COMM_WORLD);
if ( ctxt.onpe0() )
{
......
......@@ -21,18 +21,12 @@
#include <vector>
#include <cstdlib>
using namespace std;
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "Context.h"
int main(int argc, char **argv)
{
int mype;
int npes;
#ifdef USE_MPI
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &npes);
......@@ -43,7 +37,7 @@ int main(int argc, char **argv)
{ // start Context scope
Context ctxt;
Context ctxt(MPI_COMM_WORLD);
for ( int i = 0; i < npes; i++ )
{
MPI_Barrier(MPI_COMM_WORLD);
......@@ -51,87 +45,29 @@ int main(int argc, char **argv)
cout << mype << ":" << ctxt.mype() << ":" << ctxt.myproc()
<< " base: " << ctxt;
}
vector<Context*> c;
c.push_back(new Context(nr,nc));
#if 0
if ( nr >= 2 && nc >= 2 )
c.push_back(new Context(*c[0],2,2,1,1));
for ( int icol = 0; icol < c[0]->npcol(); icol++ )
{
ctxt.barrier();
c.push_back(new Context(*c[0],c[0]->nprow(),1,0,icol));
}
#endif
c.push_back(new Context(MPI_COMM_WORLD,nr,nc));
cout << ctxt.mype() << ": " << *c[0];
for ( int i = 0; i < c.size(); i++ )
{
Context* pc = c[i];
for ( int i = 0; i < npes; i++ )
{
MPI_Barrier(MPI_COMM_WORLD);
if ( i == mype )
cout << mype << ":" << pc->mype() << ":" << pc->myproc()
<< " at (" << pc->myrow() << "," << pc->mycol() << ")"
<< " in c" << i << ": " << *pc;
}
}
#if 0
MPI_Comm comm = c[1]->comm();
int mype_in_c1,size_of_c1;
MPI_Comm_rank(comm,&mype_in_c1);
MPI_Comm_size(comm,&size_of_c1);
cout << mype << ": mype_in_c1: " << mype_in_c1
<< " size_of_c1=" << size_of_c1
<< " comm[c1]=" << comm << endl;
if ( *c[0] )
cout << ctxt.mype() << ": c[0] is active" << endl;
// test dgsum2d function
double a = c[1]->mype();
cout << c[1]->mype() << ": a = " << a << endl;
c[1]->dsum('R',1,1,&a,1);
cout << c[1]->mype() << ": a_sum_row = " << a << endl;
c[1]->dsum('C',1,1,&a,1);
cout << c[1]->mype() << ": a_sum_all = " << a << endl;
// add along rows, then along columns
double a = c[0]->mype();
cout << c[0]->mype() << ": a = " << a << endl;
c[0]->dsum('R',1,1,&a,1);
cout << c[0]->mype() << ": a_sum_row = " << a << endl;
c[0]->dsum('C',1,1,&a,1);
cout << c[0]->mype() << ": a_sum_all = " << a << endl;
#endif
for ( int i = 0; i < c.size(); i++ )
{
delete c[i];
}
// // test reference counting
// if ( npes%2 == 0 && npes >= 4 )
// {
// Context *c1 = new Context(npes/2,2);
// cout << "c1: " << *c1 << endl;
// Context *c2;
// if ( c1->active() )
// c2 = new Context(*c1,npes/2,npes/2,1);
// else
// c2 = 0;
// // this line causes crash: Context *c2 = new Context(*c1,1,1,1);
// delete c1;
// if ( c2 != 0 ) cout << c2->mype() << " c2: " << *c2;
// delete c2;
// }
#if 0
}
#endif
} // end Context scope
MPI_Finalize();
#else
mype=0;
npes=1;
{
BlacsContext c1(1,1);
cout << " c1.ictxt = " << c1.ictxt() << endl;
}
#endif
}
......@@ -51,14 +51,9 @@ int main(int argc, char **argv)
{
int mype;
int npes;
#ifdef USE_MPI
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &npes);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#else
npes=1;
mype=0;
#endif
char* infilename = argv[1];
ifstream infile(infilename);
......@@ -72,11 +67,7 @@ int main(int argc, char **argv)
else
{
cerr << " invalid argv[2]" << endl;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD,2);
#else
exit(2);
#endif
}
}
Timer tm;
......@@ -96,7 +87,6 @@ int main(int argc, char **argv)
infile >> m_c >> n_c >> mb_c >> nb_c;
cout<<"m_c="<<m_c<<", n_c="<<n_c<<endl;
}
#ifdef USE_MPI
MPI_Bcast(&nprow, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&npcol, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&m_a, 1, MPI_INT, 0, MPI_COMM_WORLD);
......@@ -113,12 +103,11 @@ int main(int argc, char **argv)
MPI_Bcast(&nb_c, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&ta, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
MPI_Bcast(&tb, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
#endif
{
if ( ta == 'N' ) ta = 'n';
if ( tb == 'N' ) tb = 'n';
Context ctxt(nprow,npcol);
Context ctxt(MPI_COMM_WORLD,nprow,npcol);
if ( mype == 0 )
{
......@@ -425,7 +414,5 @@ int main(int argc, char **argv)
#endif
}
#ifdef USE_MPI
MPI_Finalize();
#endif
}
......@@ -15,8 +15,6 @@
// testSample.C
//
////////////////////////////////////////////////////////////////////////////////
// $Id: testSample.C,v 1.6 2009-11-30 02:23:26 fgygi Exp $
#include <iostream>
using namespace std;
......@@ -28,20 +26,16 @@ using namespace std;
int main(int argc, char** argv)
{
#if USE_MPI
MPI_Init(&argc,&argv);
#endif
// extra scope to ensure that BlacsContext objects get destructed before
// the MPI_Finalize call
{
Context ctxt;
Context ctxt(MPI_COMM_WORLD);
#if USE_MPI
char processor_name[MPI_MAX_PROCESSOR_NAME];
int namelen;
PMPI_Get_processor_name(processor_name,&namelen);
cout << " Process " << ctxt.mype() << " on " << processor_name << endl;
#endif
Sample s(ctxt);
......@@ -57,8 +51,6 @@ int main(int argc, char** argv)
s.wf.gram();
cout << " ortho_error: " << s.wf.sd(0,0)->ortho_error() << endl;
}
#if USE_MPI
MPI_Finalize();
#endif
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment