freepooma-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] Initialize MPI


From: Jeffrey D. Oldham
Subject: Re: [PATCH] Initialize MPI
Date: Mon, 05 Jan 2004 13:32:06 -0800
User-agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.4) Gecko/20030624

Richard Guenther wrote:
Hi!

This patch adds MPI initialization.

Ok?

Yes.

Richard.


2004Jan02  Richard Guenther <address@hidden>

        * src/Pooma/Pooma.cmpl.cpp: add initialization and
        finalization sequence for MPI. Pooma::blockAndEvaluate() at
        finalization.

--- /home/richard/src/pooma/cvs/r2/src/Pooma/Pooma.cmpl.cpp     2003-12-25 
12:26:04.000000000 +0100
+++ Pooma/Pooma.cmpl.cpp        2004-01-02 00:40:15.000000000 +0100
@@ -287,10 +287,10 @@
   // we can do this in the other initialize routine by querying for
   // the Cheetah options from the Options object.

-#if POOMA_CHEETAH
-
+#if POOMA_MPI
+  MPI_Init(&argc, &argv);
+#elif POOMA_CHEETAH
   controller_g = new Cheetah::Controller(argc, argv);
-
 #endif

   // Just create an Options object for this argc, argv set, and give that
@@ -349,12 +349,20 @@

   // Set myContext_s and numContexts_s to the context numbers.

-#if POOMA_CHEETAH
+#if POOMA_MESSAGING

+#if POOMA_MPI
+  MPI_Comm_rank(MPI_COMM_WORLD, &myContext_g);
+  MPI_Comm_size(MPI_COMM_WORLD, &numContexts_g);
+  // ugh...
+  for (int i=0; 
i<sizeof(Smarts::SystemContext::requests_m)/sizeof(MPI_Request); ++i)
+    Smarts::SystemContext::free_requests_m.insert(i);
+#elif POOMA_CHEETAH
   PAssert(controller_g != 0);

   myContext_g   = controller_g->mycontext();
   numContexts_g = controller_g->ncontexts();
+#endif

   initializeCheetahHelpers(numContexts_g);

@@ -376,14 +384,14 @@
   warnMessages(opts.printWarnings());
   errorMessages(opts.printErrors());

-#if POOMA_CHEETAH
-
   // This barrier is here so that Pooma is initialized on all contexts
   // before we continue.  (Another context could invoke a remote member
   // function on us before we're initialized... which would be bad.)

+#if POOMA_MPI
+  MPI_Barrier(MPI_COMM_WORLD);
+#elif POOMA_CHEETAH
   controller_g->barrier();
-
 #endif

   // Initialize the Inform streams with info on how many contexts we
@@ -416,6 +424,8 @@

 bool finalize(bool quitRTS, bool quitArch)
 {
+  Pooma::blockAndEvaluate();
+
   if (initialized_s)
   {
     // Wait for threads to finish.
@@ -426,7 +436,7 @@

     cleanup_s();

-#if POOMA_CHEETAH
+#if POOMA_MESSAGING
     // Clean up the Cheetah helpers.

     finalizeCheetahHelpers();
@@ -436,15 +446,19 @@

     if (quitRTS)
     {
-#if POOMA_CHEETAH
+#if POOMA_MESSAGING

       // Deleting the controller shuts down the cross-context communication
       // if this is the last thing using this controller.  If something
       // else is using this, Cheetah will not shut down until that item
       // is destroyed or stops using the controller.

+#if POOMA_MPI
+      MPI_Finalize();
+#elif POOMA_CHEETAH
       if (controller_g != 0)
        delete controller_g;
+#endif

 #endif
     }
@@ -784,18 +799,18 @@
     SystemContext_t::runSomething();
   }

-#elif POOMA_REORDER_ITERATES
+# elif POOMA_REORDER_ITERATES

   CTAssert(NO_SUPPORT_FOR_THREADS_WITH_MESSAGING);

-#else // we're using the serial scheduler, so we only need to get messages
+# else // we're using the serial scheduler, so we only need to get messages

   while (Pooma::incomingMessages())
   {
     controller_g->poll();
   }

-#endif // schedulers
+# endif // schedulers

 #else // !POOMA_CHEETAH



--
Jeffrey D. Oldham
address@hidden

reply via email to

[Prev in Thread] Current Thread [Next in Thread]