changeset 261:dafae55597ce Dev_ML

Getting closer -- added PRServ as built-in langlet (but still just copy) about to rework a lot of the Master code.. possibly eliminate core controller
author Sean Halle <seanhalle@yahoo.com>
date Tue, 23 Oct 2012 23:46:17 -0700
parents 999f2966a3e5
children a5fa1e087c7e
files AnimationMaster.c CoreController.c Defines/MEAS__macros_to_be_moved_to_langs.h Defines/PR_defs__HW_constants.h HW_Dependent_Primitives/PR__HW_measurement.c HW_Dependent_Primitives/PR__primitives_asm.s PR.h PR__PI.c PR__SS.c PR__WL.c PR__int.c PR__startup_and_shutdown.c PR_req_handlers.c Services_Offered_by_PR/Measurement_and_Stats/MEAS__macros.h Services_Offered_by_PR/Measurement_and_Stats/probes.c Services_Offered_by_PR/Measurement_and_Stats/probes.h Services_Offered_by_PR/Memory_Handling/vmalloc.c Services_Offered_by_PR/Memory_Handling/vmalloc.h Services_Offered_by_PR/Services_Language/PRServ.c Services_Offered_by_PR/Services_Language/PRServ.h Services_Offered_by_PR/Services_Language/PRServ_PluginFns.c Services_Offered_by_PR/Services_Language/PRServ_Request_Handlers.c Services_Offered_by_PR/Services_Language/PRServ_Request_Handlers.h Services_Offered_by_PR/Services_Language/PRServ_SS.c
diffstat 24 files changed, 4429 insertions(+), 1235 deletions(-) [+]
line diff
     1.1 --- a/AnimationMaster.c	Wed Sep 19 23:12:44 2012 -0700
     1.2 +++ b/AnimationMaster.c	Tue Oct 23 23:46:17 2012 -0700
     1.3 @@ -10,7 +10,10 @@
     1.4  #include <stddef.h>
     1.5  
     1.6  #include "PR.h"
     1.7 +#include "VSs_impl/VSs.h"
     1.8  
     1.9 +inline void
    1.10 +replaceWithNewSlotSlv( SlaveVP *requestingSlv, PRProcessEnv *processEnv );
    1.11  
    1.12  
    1.13  /*The animationMaster embodies most of the animator of the language.  The
    1.14 @@ -37,7 +40,7 @@
    1.15   *
    1.16   */
    1.17  
    1.18 -
    1.19 +        
    1.20  //=====================  The versions of the Animation Master  =================
    1.21  //
    1.22  //==============================================================================
    1.23 @@ -105,11 +108,11 @@
    1.24   *There is a separate masterVP for each core, but a single semantic
    1.25   * environment shared by all cores.  Each core also has its own scheduling
    1.26   * slots, which are used to communicate slaves between animationMaster and
    1.27 - * coreController.  There is only one global variable, _PRMasterEnv, which
    1.28 + * coreController.  There is only one global variable, _PRTopEnv, which
    1.29   * holds the semantic env and other things shared by the different
    1.30   * masterVPs.  The request handler and Assigner are registered with
    1.31   * the animationMaster by the language's init function, and a pointer to
    1.32 - * each is in the _PRMasterEnv. (There are also some pthread related global
    1.33 + * each is in the _PRTopEnv. (There are also some pthread related global
    1.34   * vars, but they're only used during init of PR).
    1.35   *PR gains control over the cores by essentially "turning off" the OS's
    1.36   * scheduler, using pthread pin-to-core commands.
    1.37 @@ -122,7 +125,7 @@
    1.38   * based application.
    1.39   *The masterVPs share a single system-wide master-lock, so only one
    1.40   * masterVP may be animated at a time.
    1.41 - *The core controllers access _PRMasterEnv to get the masterVP, and when
    1.42 + *The core controllers access _PRTopEnv to get the masterVP, and when
    1.43   * they start, the slots are all empty, so they run their associated core's
    1.44   * masterVP.  The first of those to get the master lock sees the seed slave
    1.45   * in the shared semantic environment, so when it runs the Assigner, that
    1.46 @@ -160,7 +163,7 @@
    1.47     int32           thisCoresIdx;
    1.48    
    1.49     //======================== Initializations ========================
    1.50 -   masterEnv        = (MasterEnv*)_VMSMasterEnv;
    1.51 +   masterEnv        = (MasterEnv*)_PRTopEnv;
    1.52     
    1.53     thisCoresIdx     = masterVP->coreAnimatedBy;
    1.54     animSlots       = masterEnv->allAnimSlots[thisCoresIdx];
    1.55 @@ -196,12 +199,12 @@
    1.56              SlaveVP *currSlave = currSlot->slaveAssignedToSlot;
    1.57              
    1.58  	justAddedReqHdlrChg();
    1.59 -			//handle the request, either by VMS or by the language
    1.60 +			//handle the request, either by PR or by the language
    1.61              if( currSlave->requests->reqType != LangReq )
    1.62 -             {    //The request is a standard VMS one, not one defined by the
    1.63 -                  // language, so VMS handles it, then queues slave to be assigned
    1.64 -               handleReqInVMS( currSlave );
    1.65 -               writePrivQ( currSlave, VMSReadyQ ); //Q slave to be assigned below
    1.66 +             {    //The request is a standard PR one, not one defined by the
    1.67 +                  // language, so PR handles it, then queues slave to be assigned
    1.68 +               handleReqInPR( currSlave );
    1.69 +               writePrivQ( currSlave, PRReadyQ ); //Q slave to be assigned below
    1.70               }
    1.71              else
    1.72               {       MEAS__startReqHdlr;
    1.73 @@ -272,7 +275,7 @@
    1.74     //#endif
    1.75     
    1.76     //======================== Initializations ========================
    1.77 -   masterEnv        = (MasterEnv*)_PRMasterEnv;
    1.78 +   masterEnv        = (MasterEnv*)_PRTopEnv;
    1.79     
    1.80     thisCoresIdx     = masterVP->coreAnimatedBy;
    1.81     animSlots        = masterEnv->allAnimSlots[thisCoresIdx];
    1.82 @@ -498,7 +501,7 @@
    1.83     //#endif
    1.84     
    1.85     //======================== Initializations ========================
    1.86 -   masterEnv        = (MasterEnv*)_PRMasterEnv;
    1.87 +   masterEnv        = (MasterEnv*)_PRTopEnv;
    1.88     
    1.89     thisCoresIdx     = masterVP->coreAnimatedBy;
    1.90     animSlots        = masterEnv->allAnimSlots[thisCoresIdx];
    1.91 @@ -614,37 +617,22 @@
    1.92  //#ifdef MODE__MULTI_PROCESS
    1.93  void animationMaster( void *initData, SlaveVP *masterVP )
    1.94   { 
    1.95 +   int32           slotIdx;
    1.96 +//   int32           numSlotsFilled;
    1.97 +   AnimSlot       *currSlot;
    1.98        //Used while scanning and filling animation slots
    1.99 -   int32           slotIdx, numSlotsFilled;
   1.100 -   AnimSlot       *currSlot, **animSlots;
   1.101 -   SlaveVP        *assignedSlaveVP;  //the slave chosen by the assigner
   1.102 +   AnimSlot      **animSlots;
   1.103     
   1.104        //Local copies, for performance
   1.105     MasterEnv      *masterEnv;
   1.106 -   SlaveAssigner   slaveAssigner;
   1.107 -   RequestHandler  requestHandler;
   1.108 -   PRSemEnv       *semanticEnv;
   1.109     int32           thisCoresIdx;
   1.110 -
   1.111 -   SlaveVP        *slave;
   1.112 -   PRProcess      *process;
   1.113 -   PRConstrEnvHolder *constrEnvHolder;
   1.114 -   int32           langMagicNumber;
   1.115     
   1.116     //======================== Initializations ========================
   1.117 -   masterEnv        = (MasterEnv*)_PRMasterEnv;
   1.118 +   masterEnv        = (MasterEnv*)_PRTopEnv;
   1.119     
   1.120     thisCoresIdx     = masterVP->coreAnimatedBy;
   1.121     animSlots        = masterEnv->allAnimSlots[thisCoresIdx];
   1.122 -
   1.123 -   requestHandler   = masterEnv->requestHandler;
   1.124 -   slaveAssigner    = masterEnv->slaveAssigner;
   1.125 -   semanticEnv      = masterEnv->semanticEnv;
   1.126 -   
   1.127 -      //initialize, for non-multi-lang, non multi-proc case
   1.128 -      // default handler gets put into master env by a registration call by lang
   1.129 -   endTaskHandler   = masterEnv->defaultTaskHandler;
   1.130 -   
   1.131 +      
   1.132        HOLISTIC__Insert_Master_Global_Vars;
   1.133     
   1.134     //======================== animationMaster ========================
   1.135 @@ -653,15 +641,36 @@
   1.136     //Having two cases makes this logic complex.. can be finishing either, and 
   1.137     // then the next available work may be either.. so really have two distinct
   1.138     // loops that are inter-twined.. 
   1.139 -   while(1){
   1.140 -       
   1.141 -      MEAS__Capture_Pre_Master_Point
   1.142 +   while(1)
   1.143 +    {  
   1.144 +            MEAS__Capture_Pre_Master_Point
   1.145 +      
   1.146 +      for( slotIdx = 0; slotIdx < NUM_ANIM_SLOTS; slotIdx++)
   1.147 +       {
   1.148 +         currSlot = animSlots[ slotIdx ];
   1.149  
   1.150 -      //Scan the animation slots
   1.151 -   numSlotsFilled = 0;
   1.152 -   for( slotIdx = 0; slotIdx < NUM_ANIM_SLOTS; slotIdx++)
   1.153 -    {
   1.154 -      currSlot = animSlots[ slotIdx ];
   1.155 +         masterFunction_multiLang( currSlot );
   1.156 +       }
   1.157 +            
   1.158 +            MEAS__Capture_Post_Master_Point;
   1.159 +    
   1.160 +      masterSwitchToCoreCtlr( masterVP ); //returns when ctlr switches back to master
   1.161 +      flushRegisters();
   1.162 +    } 
   1.163 + }
   1.164 +#endif  //MODE__MULTI_LANG
   1.165 +#endif  //MODE__MULTI_PROCESS
   1.166 +
   1.167 +inline
   1.168 +void
   1.169 +masterFunction_multiLang( AnimSlot  *currSlot )
   1.170 + {    //Scan the animation slots
   1.171 +   int32           magicNumber;
   1.172 +   SlaveVP        *slave;
   1.173 +   SlaveVP        *assignedSlaveVP;
   1.174 +   PRSemEnv       *semanticEnv;
   1.175 +   PRReqst        *req;
   1.176 +   RequestHandler  requestHandler;
   1.177  
   1.178           //Check if newly-done slave in slot, which will need request handled
   1.179        if( currSlot->workIsDone )
   1.180 @@ -674,34 +683,71 @@
   1.181              //process the request made by the slave (held inside slave struc)
   1.182           slave = currSlot->slaveAssignedToSlot;
   1.183           
   1.184 -            //check if the completed work was a task..
   1.185 -         if( slave->taskMetaInfo->isATask )
   1.186 -          {
   1.187 -             if( slave->reqst->type == TaskEnd ) 
   1.188 -              {    //do task end handler, which is registered separately
   1.189 -                   //note, end hdlr may use semantic data from reqst..
   1.190 -                   //get end-task handler
   1.191 -                //taskEndHandler = lookup( slave->reqst->langMagicNumber, processEnv );
   1.192 -                taskEndHandler = slave->taskMetaInfo->endTaskHandler;
   1.193 -                
   1.194 -                (*taskEndHandler)( slave, semanticEnv );
   1.195 -                
   1.196 -                goto AssignWork;
   1.197 -              }
   1.198 -             else  //is a task, and just suspended
   1.199 -              {    //turn slot slave into free task slave & make replacement
   1.200 -                if( slave->typeOfVP == TaskSlotSlv ) changeSlvType();
   1.201 -                
   1.202 -                //goto normal slave request handling
   1.203 -                goto SlaveReqHandling; 
   1.204 -              }
   1.205 +            //check if the slave was doing a task..
   1.206 +         //Action depends on both on the request type, and whether it's on
   1.207 +         // a generic slave vs a suspended task
   1.208 +         if( slave->metaTask->taskType == AtomicTask ||
   1.209 +             slave->metaTask->taskType == SuspendedTask )
   1.210 +          { 
   1.211 +            switch( slave->request->reqType )
   1.212 +             { case TaskEnd: 
   1.213 +                { PRHandle_EndTask( slave ); //if free task slave, update count, put into recycle Q -- do handler before lang's handler
   1.214 +
   1.215 +                     //do task end handler, which is registered separately
   1.216 +                     //note, end hdlr may use semantic data from reqst..
   1.217 +                     //get end-task handler
   1.218 +
   1.219 +                  RequestHandler
   1.220 +                  taskEndHandler = slave->metaTask->reqHandler;
   1.221 +                  semanticEnv = PR_int__give_sem_env_for_slave( slave, 
   1.222 +                                              slave->request->langMagicNumber );
   1.223 +                  (*taskEndHandler)( slave, semanticEnv );
   1.224 +
   1.225 +                  goto AssignWork;
   1.226 +                }
   1.227 +               case TaskCreate:
   1.228 +                { PRHandle_CreateTask( slave );
   1.229 +                  justCopied_check;
   1.230 +                  RequestHandler
   1.231 +                  taskCreateHandler = slave->metaTask->reqHandler;
   1.232 +                  semanticEnv = PR_int__give_sem_env_for_slave( slave, 
   1.233 +                                              slave->request->langMagicNumber );
   1.234 +                  (*taskCreateHandler)( slave, semanticEnv );
   1.235 +
   1.236 +                  want_to_resume_creating_slave;
   1.237 +                  goto AssignWork;
   1.238 +                }
   1.239 +               default:  
   1.240 +                {    //is a task, and just suspended, so tied to a free task slave
   1.241 +                     //First turn slot slave into free task slave & make replacement
   1.242 +                  if( slave->typeOfVP == TaskSlotSlv )
   1.243 +                     replaceWithNewSlotSlv( slave, slave->processSlaveIsIn->processEnv );
   1.244 +
   1.245 +                  //goto normal slave request handling
   1.246 +                  goto SlaveReqHandling; 
   1.247 +                }
   1.248 +             }
   1.249            }
   1.250           else //is a slave that suspended
   1.251            {
   1.252               
   1.253            SlaveReqHandling:
   1.254 -            (*requestHandler)( slave, semanticEnv ); //(note: indirect Fn call more efficient when use fewer params, instead re-fetch from slave)
   1.255 -         
   1.256 +               //Q: put the switch in inline call, to clean up code?
   1.257 +            req = slave->request;
   1.258 +            switch( req->reqType )
   1.259 +             { case SlvCreate:    PRHandle_CreateSlave( slave );    break;
   1.260 +               case SlvDissipate: PRHandle_Dissipate( slave ); break;
   1.261 +               case Service:      PR_int__handle_PRServiceReq( slave );  break; //resume into PR's own semantic env
   1.262 +               case Hardware: //for future expansion
   1.263 +               case IO:       //for future expansion
   1.264 +               case OSCall:   //for future expansion
   1.265 +               case Language: //normal sem request
   1.266 +                  magicNumber = slave->request->langMagicNumber;
   1.267 +                  semanticEnv = PR_PI__give_sem_env_for( slave, magicNumber );
   1.268 +                  requestHandler = semanticEnv->requestHdlr;
   1.269 +                  (*requestHandler)( slave, semanticEnv ); //(note: indirect Fn call more efficient when use fewer params, instead re-fetch from slave)
   1.270 +             }
   1.271 +            
   1.272                 HOLISTIC__Record_AppResponder_end;
   1.273                 MEAS__endReqHdlr;
   1.274                 
   1.275 @@ -709,14 +755,14 @@
   1.276            }
   1.277         } //if has suspended slave that needs handling
   1.278        
   1.279 -         //if slot empty, hand to Assigner to fill with a slave
   1.280 +         //End up here when the slot did not have ended work in it (no req)
   1.281 +         //So, here, if slot empty, look for work to fill the slot
   1.282        if( currSlot->needsSlaveAssigned )
   1.283 -       {    //Scan sem environs, looking for one with ready work.
   1.284 -            // call the Assigner for that sem Env, to give slot a new slave
   1.285 -               HOLISTIC__Record_Assigner_start;
   1.286 +       {       HOLISTIC__Record_Assigner_start;
   1.287                 
   1.288         AssignWork:
   1.289 -     
   1.290 +            //Scan sem environs, looking for semEnv with ready work.
   1.291 +            // call the Assigner for that sem Env, to get a slave for the slot
   1.292           assignedSlaveVP = assignWork( semanticEnv, currSlot );
   1.293         
   1.294              //put the chosen slave into slot, and adjust flags and state
   1.295 @@ -724,185 +770,245 @@
   1.296            { currSlot->slaveAssignedToSlot = assignedSlaveVP;
   1.297              assignedSlaveVP->animSlotAssignedTo = currSlot;
   1.298              currSlot->needsSlaveAssigned  = FALSE;
   1.299 -            numSlotsFilled               += 1;
   1.300            }
   1.301           else
   1.302 -          {
   1.303 -            currSlot->needsSlaveAssigned  = TRUE; //local write
   1.304 +          { currSlot->needsSlaveAssigned  = TRUE; //local write
   1.305            }
   1.306                 HOLISTIC__Record_Assigner_end;
   1.307         }//if slot needs slave assigned
   1.308 -    }//for( slotIdx..
   1.309 + }
   1.310  
   1.311 -         MEAS__Capture_Post_Master_Point;
   1.312 +//==========================================================================
   1.313 +/*When a task in a slot slave suspends, the slot slave has to be changed to
   1.314 + * a free task slave, then the slot slave replaced.  The replacement can be
   1.315 + * either a recycled free task slave that finished it's task and has been
   1.316 + * idle in the recycle queue, or else create a new slave to be the slot slave.
   1.317 + *The master only calls this with a slot slave that needs to be replaced.
   1.318 + */
   1.319 +inline void
   1.320 +replaceWithNewSlotSlv( SlaveVP *requestingSlv, PRProcessEnv *processEnv )
   1.321 + { SlaveVP *newSlotSlv;
   1.322 +   VSsSemData *semData;
   1.323 +
   1.324 +   fixMe__still_VSs_stuff_in_here;
   1.325 +      //get a new slave to be the slot slave
   1.326 +   newSlotSlv     = readPrivQ( processEnv->freeTaskSlvRecycleQ );
   1.327 +   if( newSlotSlv == NULL )
   1.328 +    { newSlotSlv  = PR_int__create_slaveVP( &idle_fn, NULL, processEnv, 0);
   1.329 +         //just made a new free task slave, so count it
   1.330 +      processEnv->numLiveFreeTaskSlvs += 1;
   1.331 +    }
   1.332     
   1.333 -   masterSwitchToCoreCtlr( masterVP ); //returns when ctlr switches back to master
   1.334 -   flushRegisters();
   1.335 -   }//while(1) 
   1.336 +      //set slave values to make it the slot slave
   1.337 +   newSlotSlv->metaTask              = NULL;
   1.338 +   newSlotSlv->typeOfVP              = TaskSlotSlv;
   1.339 +   newSlotSlv->needsTaskAssigned     = TRUE;
   1.340 +   
   1.341 +      //a slot slave is pinned to a particular slot on a particular core
   1.342 +      //Note, this happens before the request is seen by handler, so nothing
   1.343 +      // has had a chance to change the coreAnimatedBy or anything else..
   1.344 +   newSlotSlv->animSlotAssignedTo = requestingSlv->animSlotAssignedTo;
   1.345 +   newSlotSlv->coreAnimatedBy     = requestingSlv->coreAnimatedBy;
   1.346 +    
   1.347 +      //put it into the slot slave matrix
   1.348 +   int32 slotNum = requestingSlv->animSlotAssignedTo->slotIdx;
   1.349 +   int32 coreNum = requestingSlv->coreAnimatedBy;
   1.350 +   processEnv->slotTaskSlvs[coreNum][slotNum] = newSlotSlv;
   1.351 +
   1.352 +      //Fix up requester, to be an extra slave now (but not an ended one)
   1.353 +      // because it's active, doesn't go into freeTaskSlvRecycleQ
   1.354 +   requestingSlv->typeOfVP = FreeTaskSlv;
   1.355   }
   1.356 -#endif  //MODE__MULTI_LANG
   1.357 -#endif  //MODE__MULTI_PROCESS
   1.358  
   1.359  
   1.360 -/*This does three things:
   1.361 - * 1) ask for a slave ready to resume
   1.362 - * 2) if none, then ask for a task, and assign to the slot slave
   1.363 - * 3) if none, then prune former task slaves waiting to be recycled.
   1.364 - *
   1.365 -   //Have two separate assigners in each semantic env,
   1.366 -   // which keeps its own work in its own structures.. the master, here, 
   1.367 -   // searches through the semantic environs, takes the first that has work
   1.368 -   // available, and whatever it returns is assigned to the slot..
   1.369 -   //However, also have an override assigner.. because static analysis tools know
   1.370 -   // which languages are grouped together.. and the override enables them to
   1.371 -   // generate a custom assigner that uses info from all the languages in a 
   1.372 -   // unified way..  Don't really expect this to happen, but making it possible.
   1.373 +
   1.374 +/*This does:
   1.375 + * 1) searches the semantic environments for one with work ready
   1.376 + *    if finds one, asks its assigner to return work
   1.377 + * 2) checks what kind of work: new task, resuming task, resuming slave
   1.378 + *    if new task, gets the slot slave and assigns task to it and returns slave
   1.379 + *    else, gets the slave attached to the metaTask and returns that.
   1.380 + * 3) if no work found, then prune former task slaves waiting to be recycled.
   1.381 + *    If no work and no slaves to prune, check for shutdown conditions.
   1.382 + * 
   1.383 + * Semantic env keeps its own work in its own structures, and has its own
   1.384 + *  assigner.  It chooses 
   1.385 + * However, include a switch that switches-in an override assigner, which
   1.386 + *  sees all the work in all the semantic env's.  This is most likely  
   1.387 + *  generated by static tools and included in the executable.  That means it
   1.388 + *  has to be called via a registered pointer from here.  The idea is that
   1.389 + *  the static tools know which languages are grouped together.. and the
   1.390 + *  override enables them to generate a custom assigner that uses info from
   1.391 + *  all the languages in a unified way..  Don't really expect this to happen,
   1.392 + *  but am making it possible.
   1.393   */
   1.394  inline SlaveVP *
   1.395 -assignWork( PRProcessEnv *processEnv, AnimSlot *slot )
   1.396 - { SlaveVP     *returnSlv;
   1.397 -   //VSsSemEnv   *semEnv;
   1.398 -   //VSsSemData  *semData;
   1.399 -   int32        coreNum, slotNum;
   1.400 -   PRTaskMetaInfo *newTaskStub;
   1.401 -   SlaveVP     *freeTaskSlv;
   1.402 +assignWork( PRProcess *process, AnimSlot *slot )
   1.403 + { SlaveVP        *returnSlv;
   1.404 +   //VSsSemEnv      *semEnv;
   1.405 +   //VSsSemData     *semData;
   1.406 +   int32           coreNum, slotNum;
   1.407 +   PRMetaTask     *newMetaTask, *assignedMetaTask;
   1.408 +   SlaveVP        *freeTaskSlv;
   1.409  
   1.410 +   coreNum = slot->coreSlotIsOn;
   1.411     
   1.412 -      //master has to handle slot slaves.. so either assigner returns
   1.413 -      // taskMetaInfo or else two assigners, one for slaves, other for tasks..     
   1.414 -   semEnvs = processEnv->semEnvs;
   1.415 -   numEnvs = processEnv->numSemEnvs;
   1.416 -   for( envIdx = 0; envIdx < numEnvs; envIdx++ )
   1.417 +   if( _PRTopEnv->overrideAssigner != NULL )
   1.418 +    { assignedMetaTask = (*_PRTopEnv->overrideAssigner)( process, slot );
   1.419 +      if( assignedMetaTask != NULL )
   1.420 +       {
   1.421 +            //have work, so reset Done flag (caused by work generated on other core)
   1.422 +         if( process->coreIsDone[coreNum] == TRUE ) //reads are higher perf
   1.423 +            process->coreIsDone[coreNum] = FALSE;   //don't just write always
   1.424 +         
   1.425 +         switch( assignedMetaTask->taskType )
   1.426 +          { case GenericSlave: goto AssignSlave;
   1.427 +            case ResumedTask:  goto AssignSlave;
   1.428 +            case NewTask:      goto AssignNewTask;
   1.429 +            case default:      PR_int__throw_exception( "unknown task type ret by assigner" );
   1.430 +          }
   1.431 +       }
   1.432 +      else
   1.433 +         goto NoWork;
   1.434 +    }
   1.435 +   
   1.436 +      //If here, then no override assigner, so search semantic envs for work
   1.437 +   int32 envIdx, numEnvs; PRSemEnv **semEnvs, *semEnv; SlaveAssigner assigner;
   1.438 +   semEnvs = process->semEnvs;
   1.439 +   numEnvs = process->numSemEnvs;
   1.440 +   for( envIdx = 0; envIdx < numEnvs; envIdx++ ) //keep semEnvs in hash AND array
   1.441      { semEnv = semEnvs[envIdx];
   1.442        if( semEnv->hasWork )
   1.443         { assigner = semEnv->assigner; 
   1.444 -         retTaskMetaInfo = (*assigner)( semEnv, slot );
   1.445 +         assignedMetaTask = (*assigner)( semEnv, slot );
   1.446           
   1.447 -         return retTaskMetaInfo; //quit, have work
   1.448 +            //have work, so reset Done flag (caused by work generated on other core)
   1.449 +         if( process->coreIsDone[coreNum] == TRUE ) //reads are higher perf
   1.450 +            process->coreIsDone[coreNum] = FALSE;   //don't just write always
   1.451 +         
   1.452 +         switch( assignedMetaTask->taskType )
   1.453 +          { case GenericSlave: goto AssignSlave;
   1.454 +            case ResumedTask:  goto AssignSlave;
   1.455 +            case NewTask:      goto AssignNewTask;
   1.456 +            case default:      PR_int__throw_exception( "unknown task type ret by assigner" );
   1.457 +          }
   1.458         }
   1.459      }
   1.460     
   1.461 -   coreNum = slot->coreSlotIsOn;
   1.462 -   slotNum = slot->slotIdx;
   1.463 - 
   1.464 -      //first try to get a ready slave
   1.465 -   returnSlv = getReadySlave();
   1.466 + NoWork:
   1.467 +      //No work, if reach here..
   1.468 +      //no task, so prune the recycle pool of free task slaves
   1.469 +   freeTaskSlv = readPrivQ( process->freeTaskSlvRecycleQ );
   1.470 +   if( freeTaskSlv != NULL )
   1.471 +    {    //delete, so that bound the num extras, and deliver shutdown cond
   1.472 +      deleteExtraneousFreeTaskSlv( freeTaskSlv, process );
   1.473 +         //then return NULL
   1.474 +      returnSlv = NULL;
   1.475 +         
   1.476 +      goto ReturnTheSlv;
   1.477 +    }
   1.478 +   else
   1.479 +    { //candidate for shutdown.. all extras dissipated, and no tasks
   1.480 +      // and no ready to resume slaves, so no way to generate
   1.481 +      // more work (on this core -- other core might have work still)
   1.482 +      if( process->numLiveFreeTaskSlvs == 0 && 
   1.483 +          process->numLiveGenericSlvs == 0 )
   1.484 +       { //This core sees no way to generate more tasks, so say it
   1.485 +         if( process->coreIsDone[coreNum] == FALSE )
   1.486 +          { process->numCoresDone += 1;
   1.487 +            process->coreIsDone[coreNum] = TRUE;
   1.488 +            #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   1.489 +            process->shutdownInitiated = TRUE;
   1.490 +            
   1.491 +            #else
   1.492 +            if( process->numCoresDone == NUM_CORES )
   1.493 +             { //means no cores have work, and none can generate more
   1.494 +               process->shutdownInitiated = TRUE;
   1.495 +             }
   1.496 +            #endif
   1.497 +          }
   1.498 +       }
   1.499 +         //check if shutdown has been initiated by this or other core
   1.500 +      if( process->shutdownInitiated )
   1.501 +       { returnSlv = PR_SS__create_shutdown_slave();
   1.502 +       }
   1.503 +      else
   1.504 +         returnSlv = NULL;
   1.505  
   1.506 -   if( returnSlv != NULL )
   1.507 -    { returnSlv->coreAnimatedBy   = coreNum;
   1.508 -    
   1.509 -         //have work, so reset Done flag (when work generated on other core)
   1.510 -      if( processEnv->coreIsDone[coreNum] == TRUE ) //reads are higher perf
   1.511 -         processEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
   1.512 +      goto ReturnTheSlv;
   1.513 +    } //if( freeTaskSlv != NULL )
   1.514 +
   1.515 +
   1.516 + AssignSlave:
   1.517 +    {    //get slave pointed to by meta task.
   1.518 +      returnSlv = assignedMetaTask->slaveAssignedTo;
   1.519 +
   1.520 +      returnSlv->coreAnimatedBy   = coreNum;
   1.521      
   1.522        goto ReturnTheSlv;
   1.523      }
   1.524 -   
   1.525 -      //were no slaves, so try to get a ready task.. 
   1.526 -   newTaskStub = getTaskStub();
   1.527 -   
   1.528 -   if( newTaskStub != NULL )
   1.529 + 
   1.530 + AssignNewTask:
   1.531      { 
   1.532           //get the slot slave to assign the task to..
   1.533 -      returnSlv = processEnv->slotTaskSlvs[coreNum][slotNum];
   1.534 +      coreNum = slot->coreSlotIsOn;
   1.535 +      slotNum = slot->slotIdx;
   1.536 +      returnSlv = process->slotTaskSlvs[coreNum][slotNum];
   1.537  
   1.538           //point slave to task's function, and mark slave as having task
   1.539        PR_int__reset_slaveVP_to_TopLvlFn( returnSlv, 
   1.540 -                          newTaskStub->taskType->fn, newTaskStub->args );
   1.541 -      returnSlv->taskStub          = newTaskStub;
   1.542 -      newTaskStub->slaveAssignedTo = returnSlv;
   1.543 +                       assignedMetaTask->topLevelFn, assignedMetaTask->initData );
   1.544 +      returnSlv->metaTask          = assignedMetaTask;
   1.545 +      assignedMetaTask->slaveAssignedTo = returnSlv;
   1.546        returnSlv->needsTaskAssigned = FALSE;  //slot slave is a "Task" slave type
   1.547        
   1.548           //have work, so reset Done flag, if was set
   1.549 -      if( processEnv->coreIsDone[coreNum] == TRUE ) //reads are higher perf
   1.550 -         processEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
   1.551 +      if( process->coreIsDone[coreNum] == TRUE ) //reads are higher perf
   1.552 +         process->coreIsDone[coreNum] = FALSE;   //don't just write always
   1.553        
   1.554        goto ReturnTheSlv;
   1.555      }
   1.556 -   else
   1.557 -    {    //no task, so prune the recycle pool of free task slaves
   1.558 -      freeTaskSlv = readPrivQ( processEnv->freeTaskSlvRecycleQ );
   1.559 -      if( freeTaskSlv != NULL )
   1.560 -       {    //delete to bound the num extras, and deliver shutdown cond
   1.561 -         handleDissipate( freeTaskSlv, processEnv );
   1.562 -            //then return NULL
   1.563 -         returnSlv = NULL;
   1.564 -         
   1.565 -         goto ReturnTheSlv;
   1.566 -       }
   1.567 -      else
   1.568 -       { //candidate for shutdown.. if all extras dissipated, and no tasks
   1.569 -         // and no ready to resume slaves, then no way to generate
   1.570 -         // more tasks (on this core -- other core might have task still)
   1.571 -         if( processEnv->numLiveExtraTaskSlvs == 0 && 
   1.572 -             processEnv->numLiveThreadSlvs == 0 )
   1.573 -          { //This core sees no way to generate more tasks, so say it
   1.574 -            if( processEnv->coreIsDone[coreNum] == FALSE )
   1.575 -             { processEnv->numCoresDone += 1;
   1.576 -               processEnv->coreIsDone[coreNum] = TRUE;
   1.577 -               #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   1.578 -               processEnv->shutdownInitiated = TRUE;
   1.579 -               
   1.580 -               #else
   1.581 -               if( processEnv->numCoresDone == NUM_CORES )
   1.582 -                { //means no cores have work, and none can generate more
   1.583 -                  processEnv->shutdownInitiated = TRUE;
   1.584 -                }
   1.585 -               #endif
   1.586 -             }
   1.587 -          }
   1.588 -            //check if shutdown has been initiated by this or other core
   1.589 -         if(processEnv->shutdownInitiated) 
   1.590 -          { returnSlv = PR_SS__create_shutdown_slave();
   1.591 -          }
   1.592 -         else
   1.593 -            returnSlv = NULL;
   1.594 -
   1.595 -         goto ReturnTheSlv; //don't need, but completes pattern
   1.596 -       } //if( freeTaskSlv != NULL )
   1.597 -    } //if( newTaskStub == NULL )
   1.598 -   //outcome: 1)slave was just pointed to task, 2)no tasks, so slave NULL
   1.599   
   1.600  
   1.601   ReturnTheSlv:  //All paths goto here.. to provide single point for holistic..
   1.602  
   1.603     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   1.604     if( returnSlv == NULL )
   1.605 -    { returnSlv = processEnv->idleSlv[coreNum][slotNum]; 
   1.606 +    { returnSlv = process->idleSlv[coreNum][slotNum]; 
   1.607      
   1.608           //things that would normally happen in resume(), but idle VPs
   1.609           // never go there
   1.610 -      returnSlv->assignCount++; //gives each idle unit a unique ID
   1.611 +      returnSlv->numTimesAssignedToASlot++; //gives each idle unit a unique ID
   1.612        Unit newU;
   1.613        newU.vp = returnSlv->slaveID;
   1.614 -      newU.task = returnSlv->assignCount;
   1.615 -      addToListOfArrays(Unit,newU,processEnv->unitList);
   1.616 +      newU.task = returnSlv->numTimesAssignedToASlot;
   1.617 +      addToListOfArrays(Unit,newU,process->unitList);
   1.618  
   1.619 -      if (returnSlv->assignCount > 1) //make a dependency from prev idle unit
   1.620 +      if (returnSlv->numTimesAssignedToASlot > 1) //make a dependency from prev idle unit
   1.621         { Dependency newD;             // to this one
   1.622           newD.from_vp = returnSlv->slaveID;
   1.623 -         newD.from_task = returnSlv->assignCount - 1;
   1.624 +         newD.from_task = returnSlv->numTimesAssignedToASlot - 1;
   1.625           newD.to_vp = returnSlv->slaveID;
   1.626 -         newD.to_task = returnSlv->assignCount;
   1.627 -         addToListOfArrays(Dependency, newD ,processEnv->ctlDependenciesList);  
   1.628 +         newD.to_task = returnSlv->numTimesAssignedToASlot;
   1.629 +         addToListOfArrays(Dependency, newD ,process->ctlDependenciesList);  
   1.630         }
   1.631      }
   1.632     else //have a slave will be assigned to the slot
   1.633      { //assignSlv->numTimesAssigned++;
   1.634           //get previous occupant of the slot
   1.635        Unit prev_in_slot = 
   1.636 -         processEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
   1.637 +         process->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
   1.638        if(prev_in_slot.vp != 0) //if not first slave in slot, make dependency
   1.639         { Dependency newD;      // is a hardware dependency
   1.640           newD.from_vp = prev_in_slot.vp;
   1.641           newD.from_task = prev_in_slot.task;
   1.642           newD.to_vp = returnSlv->slaveID;
   1.643 -         newD.to_task = returnSlv->assignCount;
   1.644 -         addToListOfArrays(Dependency,newD,processEnv->hwArcs);   
   1.645 +         newD.to_task = returnSlv->numTimesAssignedToASlot;
   1.646 +         addToListOfArrays(Dependency,newD,process->hwArcs);   
   1.647         }
   1.648        prev_in_slot.vp = returnSlv->slaveID; //make new slave the new previous
   1.649 -      prev_in_slot.task = returnSlv->assignCount;
   1.650 -      processEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] =
   1.651 +      prev_in_slot.task = returnSlv->numTimesAssignedToASlot;
   1.652 +      process->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] =
   1.653           prev_in_slot;        
   1.654      }
   1.655     #endif
   1.656 @@ -910,92 +1016,150 @@
   1.657     return( returnSlv );
   1.658   }
   1.659  
   1.660 -      
   1.661 -//=================================================================
   1.662 -         //#else  //is MODE__MULTI_LANG
   1.663 -            //For multi-lang mode, first, get the constraint-env holder out of
   1.664 -            // the process, which is in the slave.
   1.665 -            //Second, get the magic number out of the request, use it to look up
   1.666 -            // the constraint Env within the constraint-env holder.
   1.667 -            //Then get the request handler out of the constr env
   1.668 -         constrEnvHolder = slave->process->constrEnvHolder;
   1.669 -         reqst = slave->request;
   1.670 -         langMagicNumber = reqst->langMagicNumber;
   1.671 -         semanticEnv = lookup( langMagicNumber, constrEnvHolder ); //a macro
   1.672 -         if( slave->reqst->type == taskEnd ) //end-task is special
   1.673 -          {    //need to know what lang's task ended
   1.674 -            taskEndHandler = semanticEnv->taskEndHandler;
   1.675 -            (*taskEndHandler)( slave, reqst, semanticEnv ); //can put semantic data into task end reqst, for continuation, etc
   1.676 -               //this is a slot slave, get a new task for it
   1.677 -            if( !existsOverrideAssigner )//if exists, is set above, before loop
   1.678 -             {    //search for task assigner that has work
   1.679 -               for( a = 0; a < num_assigners; a++ )
   1.680 -                { if( taskAssigners[a]->hasWork )
   1.681 -                   { newTaskAssigner = taskAssigners[a];
   1.682 -                     (*newTaskAssigner)( slave, semanticEnv );
   1.683 -                     goto GotTask;
   1.684 -                   }
   1.685 -                }
   1.686 -               goto NoTasks;
   1.687 -             }
   1.688 -            
   1.689 -           GotTask:
   1.690 -            continue; //have work, so do next iter of loop, don't call slave assigner
   1.691 -          }
   1.692 -         if( slave->typeOfVP == taskSlotSlv ) changeSlvType();//is suspended task
   1.693 -            //now do normal suspended slave request handler
   1.694 -         requestHandler = semanticEnv->requestHandler;
   1.695 -         //#endif
   1.696  
   1.697 -         
   1.698 -       }
   1.699 -         //If make it here, then was no task for this slot
   1.700 -         //slot empty, hand to Assigner to fill with a slave
   1.701 -      if( currSlot->needsSlaveAssigned )
   1.702 -       {    //Call plugin's Assigner to give slot a new slave
   1.703 -               HOLISTIC__Record_Assigner_start;
   1.704 -               
   1.705 -         //#ifdef  MODE__MULTI_LANG
   1.706 -        NoTasks:
   1.707 -            //First, choose an Assigner..
   1.708 -            //There are several Assigners, one for each langlet.. they all
   1.709 -            // indicate whether they have work available.. just pick the first
   1.710 -            // one that has work..  Or, if there's a Unified Assigner, call
   1.711 -            // that one..  So, go down array, checking..
   1.712 -         if( !existsOverrideAssigner ) 
   1.713 -          { for( a = 0; a < num_assigners; a++ )
   1.714 -             { if( assigners[a]->hasWork )
   1.715 -                { slaveAssigner = assigners[a];
   1.716 -                  goto GotAssigner;
   1.717 -                }
   1.718 -             }
   1.719 -            //no work, so just continue to next iter of scan loop
   1.720 -            continue;
   1.721 -          }
   1.722 -         //when exists override, the assigner is set, once, above, so do nothing
   1.723 -        GotAssigner:
   1.724 -         //#endif
   1.725 -        
   1.726 -         assignedSlaveVP =
   1.727 -          (*slaveAssigner)( semanticEnv, currSlot );
   1.728 -         
   1.729 -            //put the chosen slave into slot, and adjust flags and state
   1.730 -         if( assignedSlaveVP != NULL )
   1.731 -          { currSlot->slaveAssignedToSlot = assignedSlaveVP;
   1.732 -            assignedSlaveVP->animSlotAssignedTo = currSlot;
   1.733 -            currSlot->needsSlaveAssigned  = FALSE;
   1.734 -            numSlotsFilled               += 1;
   1.735 -            
   1.736 -            HOLISTIC__Record_Assigner_end;
   1.737 -          }
   1.738 -       }//if slot needs slave assigned
   1.739 -    }//for( slotIdx..
   1.740 -
   1.741 -         MEAS__Capture_Post_Master_Point;
   1.742 +/*In creator, only PR related things happen, and things in the langlet whose
   1.743 + * creator construct was used.
   1.744 + *Other langlet still gets a chance to create semData -- but by registering a
   1.745 + * "createSemData" handler in the semEnv.  When a construct  of the langlet
   1.746 + * calls "PR__give_sem_data()", if there is no semData for that langlet,
   1.747 + * the PR will call the creator in the langlet's semEnv, place whatever it
   1.748 + * makes as the semData in that slave for that langlet, and return that semData
   1.749 + *
   1.750 + *So, as far as counting things, a langlet is only allowed to count creation
   1.751 + * of slaves it creates itself..  may have to change this later.. add a way for
   1.752 + * langlet to register a trigger Fn called each time a slave gets created.. 
   1.753 + * need more experience with what langlets will do at create time..  think Cilk
   1.754 + * has interesting create behavior..  not sure how that will differ in light
   1.755 + * of true tasks and langlet approach.  Look at it after all done and start
   1.756 + * modifying the langs to be langlets..
   1.757 + * 
   1.758 + *PR itself needs to create the slave, then update numLiveSlaves in process,
   1.759 + * copy processID from requestor to newly created
   1.760 + */
   1.761 +PRHandle_CreateSlave( PRReqst *req, SlaveVP *requestingSlv )
   1.762 + { SlaveVP *newSlv;
   1.763 +   PRMetaTask metaTask;
   1.764 +   PRProcess *process;
   1.765 + 
   1.766 +   process = requestingSlv->processSlaveIsIn;
   1.767 +   newSlv = PR_int__create_slaveVP();
   1.768 +   newSlv->typeOfVP = GenericSlv;
   1.769 +   newSlv->processSlaveIsIn = process;
   1.770 +   process->numLiveGenericSlaves += 1;
   1.771 +   metaTask = PR_int__create_slave_meta_task();
   1.772 +   metaTask->taskID = req->ID;
   1.773 +   metaTask->taskType = GenericSlave;
   1.774     
   1.775 -   masterSwitchToCoreCtlr( masterVP );
   1.776 -   flushRegisters();
   1.777 -         DEBUG__printf(FALSE,"came back after switch to core -- so lock released!");
   1.778 -   }//while(1) 
   1.779 +   (*req->handler)(newSlv);
   1.780   }
   1.781  
   1.782 +/*The dissipate handler has to update the number of slaves of the type, within
   1.783 + * the process, and call the langlet handler linked into the request,
   1.784 + * and after that returns, then call the PR function that frees the slave state
   1.785 + * (or recycles the slave).
   1.786 + * 
   1.787 + *The PR function that frees the slave state has to also free all of the
   1.788 + * semData in the slave..  or else reset all of the semDatas.. by, say, marking
   1.789 + * them, then in PR__give_semData( magicNum ) call the langlet registered
   1.790 + * "resetSemData" Fn.
   1.791 + */
   1.792 +PRHandle_Dissipate( SlaveVP *slave )
   1.793 + { PRProcess *process;
   1.794 +   void      *semEnv;
   1.795 +   
   1.796 +   process = slave->processSlaveIsIn;
   1.797 +   
   1.798 +      //do the language's dissipate handler
   1.799 +   semEnv = PR_int__give_sem_env_for( slave, slave->request->langMagicNumber );
   1.800 +   (*slave->request->handler)( slave, semEnv );
   1.801 +   
   1.802 +   process->numLiveGenericSlaves -= 1;
   1.803 +   PR_int__dissipate_slaveVP_multilang( slave ); //recycles and resets semDatas
   1.804 +   
   1.805 +      //check End Of Process Condition
   1.806 +   if( process->numLiveTasks == 0 &&
   1.807 +       process->numLiveGenericSlaves == 0 )
   1.808 +      signalEndOfProcess;
   1.809 + }
   1.810 +
   1.811 +/*Create task is a special form, that has PR behavior in addition to plugin
   1.812 + * behavior.  Master calls this first, and this in turn calls the plugin's
   1.813 + * create task handler.
   1.814 + */
   1.815 +inline void
   1.816 +PRHandle_CreateTask( TopLevelFn topLevelFn, void *initData, PRReqst *req, 
   1.817 +                                                        SlaveVP *requestingSlv )
   1.818 + { PRMetaTask    *metaTask;
   1.819 +   PRProcess     *process;
   1.820 +   void          *semEnv, _langMetaTask;
   1.821 +   PRLangMetaTask *langMetaTask;
   1.822 +                    
   1.823 +   process = requestingSlv->processSlaveIsIn;
   1.824 +
   1.825 +   metaTask         = PR_int__create_meta_task( req );
   1.826 +   metaTask->taskID = req->ID; //may be NULL
   1.827 +   metaTask->topLevelFn = topLevelFn;
   1.828 +   metaTask->initData   = initData;
   1.829 +           
   1.830 +   process->numLiveTasks += 1;
   1.831 +      
   1.832 +      //plugin tracks tasks ready, and has its own assigner, so task doesn't
   1.833 +      // come back from lang's handler -- it's consumed and stays in semEnv.
   1.834 +      //But handler gives back the language-specific meta-task it creates, and
   1.835 +      // then hook that into the PR meta-task
   1.836 +      //(Could also do PRMetaTask as a prolog -- make a Fn that takes the size
   1.837 +      // of the lang's metaTask, and alloc's that plus the prolog and returns
   1.838 +      // ptr to position just above the prolog)
   1.839 +   semEnv = PR_int__give_semEnv_of_req( req, requestingSlv ); //magic num in req
   1.840 +   _langMetaTask = (*requestingSlv->request->handler)(req, semEnv);
   1.841 +   langMetaTask  = (PRLangMetaTask *)_langMetaTask;
   1.842 +   metaTask->langMetaTask      = langMetaTask;
   1.843 +   langMetaTask->protoMetaTask = metaTask;
   1.844 +   
   1.845 +   return;
   1.846 + }
   1.847 +
   1.848 +/*When a task ends, are two scenarios: 1) task ran to completion, or 2) task
   1.849 + * suspended at some point in its code.
   1.850 + *For 1, just decr count of live tasks (and check for end condition) -- the
   1.851 + * master loop will decide what goes into the slot freed up by this task end,
   1.852 + * so, here, don't worry about assigning a new task to the slot slave.
   1.853 + *For 2, the task's slot slave has been converted to a free task slave, which
   1.854 + * now has nothing more to do, so send it to the recycle Q (which includes
   1.855 + * freeing all the semData and meta task structs alloc'd for it).  Then
   1.856 + * decrement the live task count and check end condition.
   1.857 + * 
   1.858 + *PR has to update count of live tasks, and check end of process condition.
   1.859 + * There are constructs that wait for a process to end, so when end detected,
   1.860 + * have to resume what's waiting..
   1.861 + *Thing is, the wait is used in "main", so it's an OS thread.  That means
   1.862 + * PR internals have to do OS thread signaling.  Want to do that in the
   1.863 + * core controller, which has the original stack of an OS thread.
   1.864 + * 
   1.865 + *So here, when detect process end, signal to the core controller, which will
   1.866 + * then do the condition variable notify to the OS thread that's waiting. 
   1.867 + */
   1.868 +inline void
   1.869 +PRHandle_EndTask( SlaveVP *requestingSlv )
   1.870 + { void *semEnv;
   1.871 +   PRReqst *req;  
   1.872 +   PRMetaTask *metaTask;
   1.873 +   PRProcess  *process;
   1.874 + 
   1.875 +   req = requestingSlv->request;
   1.876 +   semEnv = PR_int__give_semEnv_of_req( req, requestingSlv ); //magic num in req
   1.877 +   metaTask = req->metaTask;
   1.878 +      //Want to keep PRMetaTask hidden from plugin, so extract semReq..
   1.879 +   (*req->handler)( metaTask, req->semReq, semEnv );
   1.880 +   
   1.881 +   recycleFreeTaskSlave( requestingSlv );
   1.882 +   
   1.883 +   process->numLiveTasks -= 1;
   1.884 +  
   1.885 +      //check End Of Process Condition
   1.886 +   if( process->numLiveTasks == 0 &&
   1.887 +       process->numLiveGenericSlaves == 0 )
   1.888 +      signalEndOfProcessToCoreCtlr;
   1.889 + }
   1.890 +
   1.891 + 
   1.892 \ No newline at end of file
     2.1 --- a/CoreController.c	Wed Sep 19 23:12:44 2012 -0700
     2.2 +++ b/CoreController.c	Tue Oct 23 23:46:17 2012 -0700
     2.3 @@ -92,13 +92,13 @@
     2.4     thisCoresIdx = thisCoresThdParams->coreNum;
     2.5  
     2.6        //Assembly that saves addr of label of return instr -- label in assmbly
     2.7 -   recordCoreCtlrReturnLabelAddr((void**)&(_PRMasterEnv->coreCtlrReturnPt));
     2.8 +   recordCoreCtlrReturnLabelAddr((void**)&(_PRTopEnv->coreCtlrReturnPt));
     2.9  
    2.10 -   animSlots = _PRMasterEnv->allAnimSlots[thisCoresIdx];
    2.11 +   animSlots = _PRTopEnv->allAnimSlots[thisCoresIdx];
    2.12     currSlotIdx = 0; //start at slot 0, go up until one empty, then do master
    2.13     numRepetitionsWithNoWork = 0;
    2.14 -   addrOfMasterLock = &(_PRMasterEnv->masterLock);
    2.15 -   thisCoresMasterVP = _PRMasterEnv->masterVPs[thisCoresIdx];
    2.16 +   addrOfMasterLock = &(_PRTopEnv->masterLock);
    2.17 +   thisCoresMasterVP = _PRTopEnv->masterVPs[thisCoresIdx];
    2.18     
    2.19     //==================== pthread related stuff ======================
    2.20        //pin the pthread to the core -- takes away Linux control
    2.21 @@ -113,7 +113,7 @@
    2.22  
    2.23        //make sure the controllers all start at same time, by making them wait
    2.24     pthread_mutex_lock(  &suspendLock );
    2.25 -   while( !(_PRMasterEnv->setupComplete) )
    2.26 +   while( !(_PRTopEnv->setupComplete) )
    2.27      { pthread_cond_wait( &suspendCond, &suspendLock );
    2.28      }
    2.29     pthread_mutex_unlock( &suspendLock );
    2.30 @@ -225,11 +225,11 @@
    2.31  inline uint32_t
    2.32  randomNumber()
    2.33   {
    2.34 -	_PRMasterEnv->seed1 = (uint32)(36969 * (_PRMasterEnv->seed1 & 65535) + 
    2.35 -                                   (_PRMasterEnv->seed1 >> 16) );
    2.36 -	_PRMasterEnv->seed2 = (uint32)(18000 * (_PRMasterEnv->seed2 & 65535) + 
    2.37 -                                   (_PRMasterEnv->seed2 >> 16) );
    2.38 -	return (_PRMasterEnv->seed1 << 16) + _PRMasterEnv->seed2;
    2.39 +	_PRTopEnv->seed1 = (uint32)(36969 * (_PRTopEnv->seed1 & 65535) + 
    2.40 +                                   (_PRTopEnv->seed1 >> 16) );
    2.41 +	_PRTopEnv->seed2 = (uint32)(18000 * (_PRTopEnv->seed2 & 65535) + 
    2.42 +                                   (_PRTopEnv->seed2 >> 16) );
    2.43 +	return (_PRTopEnv->seed1 << 16) + _PRTopEnv->seed2;
    2.44   }
    2.45  
    2.46  
    2.47 @@ -292,14 +292,14 @@
    2.48     
    2.49     //===============  Initializations ===================
    2.50     thisCoresIdx = 0; //sequential version
    2.51 -   animSlots = _PRMasterEnv->allAnimSlots[thisCoresIdx];
    2.52 +   animSlots = _PRTopEnv->allAnimSlots[thisCoresIdx];
    2.53     currSlotIdx = 0; //start at slot 0, go up until one empty, then do master
    2.54     numRepetitionsWithNoWork = 0;
    2.55 -   addrOfMasterLock = &(_PRMasterEnv->masterLock);
    2.56 -   thisCoresMasterVP = _PRMasterEnv->masterVPs[thisCoresIdx];
    2.57 +   addrOfMasterLock = &(_PRTopEnv->masterLock);
    2.58 +   thisCoresMasterVP = _PRTopEnv->masterVPs[thisCoresIdx];
    2.59     
    2.60        //Assembly that saves addr of label of return instr -- label in assmbly
    2.61 -   recordCoreCtlrReturnLabelAddr((void**)&(_PRMasterEnv->coreCtlrReturnPt));
    2.62 +   recordCoreCtlrReturnLabelAddr((void**)&(_PRTopEnv->coreCtlrReturnPt));
    2.63  
    2.64     
    2.65     //====================== The Core Controller ======================
     3.1 --- a/Defines/MEAS__macros_to_be_moved_to_langs.h	Wed Sep 19 23:12:44 2012 -0700
     3.2 +++ b/Defines/MEAS__macros_to_be_moved_to_langs.h	Tue Oct 23 23:46:17 2012 -0700
     3.3 @@ -26,8 +26,8 @@
     3.4  #define syncHistIdx       2
     3.5  
     3.6  #define MEAS__Make_Meas_Hists_for_Language() \
     3.7 -   _PRMasterEnv->measHistsInfo = \
     3.8 -          makePrivDynArrayOfSize( (void***)&(_PRMasterEnv->measHists), 200); \
     3.9 +   _PRTopEnv->measHistsInfo = \
    3.10 +          makePrivDynArrayOfSize( (void***)&(_PRTopEnv->measHists), 200); \
    3.11      makeAMeasHist( spawnHistIdx,      "Spawn",        50, 0, 200 ) \
    3.12      makeAMeasHist( syncHistIdx,       "Sync",         50, 0, 200 )
    3.13  
    3.14 @@ -39,7 +39,7 @@
    3.15  #define Meas_endSpawn \
    3.16      saveLowTimeStampCountInto( endStamp ); \
    3.17      addIntervalToHist( startStamp, endStamp, \
    3.18 -                             _PRMasterEnv->measHists[ spawnHistIdx ] );
    3.19 +                             _PRTopEnv->measHists[ spawnHistIdx ] );
    3.20  
    3.21  #define Meas_startSync \
    3.22      int32 startStamp, endStamp; \
    3.23 @@ -48,7 +48,7 @@
    3.24  #define Meas_endSync \
    3.25      saveLowTimeStampCountInto( endStamp ); \
    3.26      addIntervalToHist( startStamp, endStamp, \
    3.27 -                             _PRMasterEnv->measHists[ syncHistIdx ] );
    3.28 +                             _PRTopEnv->measHists[ syncHistIdx ] );
    3.29  #endif
    3.30  
    3.31  //===========================================================================
     4.1 --- a/Defines/PR_defs__HW_constants.h	Wed Sep 19 23:12:44 2012 -0700
     4.2 +++ b/Defines/PR_defs__HW_constants.h	Tue Oct 23 23:46:17 2012 -0700
     4.3 @@ -16,10 +16,14 @@
     4.4     // machine
     4.5  #define NUM_CORES        4
     4.6  
     4.7 -   // tradeoff amortizing master fixed overhead vs imbalance potential
     4.8 +   //tradeoff amortizing master fixed overhead vs imbalance potential
     4.9     // when work-stealing, can make bigger, at risk of losing cache affinity
    4.10  #define NUM_ANIM_SLOTS  1
    4.11  
    4.12 +   //number of PRSemEnv structs created inside a process -- can't start more
    4.13 +   // than this many langlets inside a single process
    4.14 +#define NUM_SEM_ENVS_IN_PROCESS 64
    4.15 +
    4.16     //These are for backoff inside core-loop, which reduces lock contention
    4.17  #define NUM_REPS_W_NO_WORK_BEFORE_YIELD      10
    4.18  #define NUM_REPS_W_NO_WORK_BEFORE_BACKOFF    2
    4.19 @@ -36,6 +40,8 @@
    4.20     //Frequency of TS counts -- have to do tests to verify
    4.21     //NOTE: turn off (in BIOS)  TURBO-BOOST and SPEED-STEP else won't be const
    4.22  #define TSCOUNT_FREQ 3180000000
    4.23 +#define TSC_LOW_CYCLES 27
    4.24 +#define TSC_LOWHI_CYCLES 45
    4.25  
    4.26  #define CACHE_LINE_SZ  256
    4.27  #define PAGE_SIZE     4096
     5.1 --- a/HW_Dependent_Primitives/PR__HW_measurement.c	Wed Sep 19 23:12:44 2012 -0700
     5.2 +++ b/HW_Dependent_Primitives/PR__HW_measurement.c	Tue Oct 23 23:46:17 2012 -0700
     5.3 @@ -27,25 +27,25 @@
     5.4      {
     5.5         hw_event.type = PERF_TYPE_HARDWARE;	
     5.6         hw_event.config = PERF_COUNT_HW_CPU_CYCLES; //cycles
     5.7 -        _PRMasterEnv->cycles_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
     5.8 +        _PRTopEnv->cycles_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
     5.9   		0,//pid_t pid, 
    5.10  		coreIdx,//int cpu, 
    5.11  		-1,//int group_fd,
    5.12  		0//unsigned long flags
    5.13  	);
    5.14 -        if (_PRMasterEnv->cycles_counter_fd[coreIdx]<0){
    5.15 +        if (_PRTopEnv->cycles_counter_fd[coreIdx]<0){
    5.16              fprintf(stderr,"On core %d: ",coreIdx);
    5.17              perror("Failed to open cycles counter");
    5.18          }
    5.19          hw_event.type = PERF_TYPE_HARDWARE;
    5.20          hw_event.config = PERF_COUNT_HW_INSTRUCTIONS; //instrs
    5.21 -        _PRMasterEnv->instrs_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
    5.22 +        _PRTopEnv->instrs_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
    5.23   		0,//pid_t pid, 
    5.24  		coreIdx,//int cpu, 
    5.25  		-1,//int group_fd,
    5.26  		0//unsigned long flags
    5.27  	);
    5.28 -        if (_PRMasterEnv->instrs_counter_fd[coreIdx]<0){
    5.29 +        if (_PRTopEnv->instrs_counter_fd[coreIdx]<0){
    5.30              fprintf(stderr,"On core %d: ",coreIdx);
    5.31              perror("Failed to open instrs counter");
    5.32          }
    5.33 @@ -53,13 +53,13 @@
    5.34          hw_event.config = PERF_COUNT_HW_CACHE_L1D <<  0  |
    5.35  	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
    5.36  	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16); //cache misses
    5.37 -        _PRMasterEnv->cachem_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
    5.38 +        _PRTopEnv->cachem_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
    5.39   		0,//pid_t pid, 
    5.40  		coreIdx,//int cpu, 
    5.41  		-1,//int group_fd,
    5.42  		0//unsigned long flags
    5.43  	);
    5.44 -        if (_PRMasterEnv->cachem_counter_fd[coreIdx]<0){
    5.45 +        if (_PRTopEnv->cachem_counter_fd[coreIdx]<0){
    5.46              fprintf(stderr,"On core %d: ",coreIdx);
    5.47              perror("Failed to open cache miss counter");
    5.48              exit(1);
     6.1 --- a/HW_Dependent_Primitives/PR__primitives_asm.s	Wed Sep 19 23:12:44 2012 -0700
     6.2 +++ b/HW_Dependent_Primitives/PR__primitives_asm.s	Tue Oct 23 23:46:17 2012 -0700
     6.3 @@ -51,7 +51,7 @@
     6.4   * 0x18 coreCtlrFramePtr
     6.5   * 0x20 coreCtlrStackPtr
     6.6   *
     6.7 - * _PRMasterEnv  offsets:
     6.8 + * _PRTopEnv  offsets:
     6.9   * 0x00 coreCtlrReturnPt
    6.10   * 0x100 masterLock
    6.11   */
    6.12 @@ -76,7 +76,7 @@
    6.13   * 0x18 coreCtlrFramePtr
    6.14   * 0x20 coreCtlrStackPtr
    6.15   *
    6.16 - * _PRMasterEnv  offsets:
    6.17 + * _PRTopEnv  offsets:
    6.18   * 0x00 coreCtlrReturnPt
    6.19   * 0x100 masterLock
    6.20   */
    6.21 @@ -88,8 +88,8 @@
    6.22      movq    %rbp      , 0x08(%rdi)   #save frame pointer
    6.23      movq    0x20(%rdi), %rsp         #restore stack pointer
    6.24      movq    0x18(%rdi), %rbp         #restore frame pointer
    6.25 -    movq    $_PRMasterEnv, %rcx
    6.26 -    movq        (%rcx), %rcx         #_PRMasterEnv is pointer to struct
    6.27 +    movq    $_PRTopEnv, %rcx
    6.28 +    movq        (%rcx), %rcx         #_PRTopEnv is pointer to struct
    6.29      movq    0x00(%rcx), %rax         #get CoreCtlrStartPt
    6.30      jmp     *%rax                    #jmp to CoreCtlr
    6.31  SlvReturn:
    6.32 @@ -106,7 +106,7 @@
    6.33   * 0x18 coreCtlrFramePtr
    6.34   * 0x20 coreCtlrStackPtr
    6.35   *
    6.36 - * _PRMasterEnv  offsets:
    6.37 + * _PRTopEnv  offsets:
    6.38   * 0x00 coreCtlrReturnPt
    6.39   * 0x100 masterLock
    6.40   */
    6.41 @@ -118,8 +118,8 @@
    6.42      movq    %rbp      , 0x08(%rdi)   #save frame pointer
    6.43      movq    0x20(%rdi), %rsp         #restore stack pointer
    6.44      movq    0x18(%rdi), %rbp         #restore frame pointer
    6.45 -    movq    $_PRMasterEnv, %rcx
    6.46 -    movq        (%rcx), %rcx         #_PRMasterEnv is pointer to struct
    6.47 +    movq    $_PRTopEnv, %rcx
    6.48 +    movq        (%rcx), %rcx         #_PRTopEnv is pointer to struct
    6.49      movq    0x00(%rcx), %rax         #get CoreCtlr return pt
    6.50      movl    $0x0      , 0x100(%rcx)  #release lock
    6.51      jmp     *%rax                    #jmp to CoreCtlr
    6.52 @@ -142,7 +142,7 @@
    6.53   * 0x18 coreCtlrFramePtr
    6.54   * 0x20 coreCtlrStackPtr
    6.55   *
    6.56 - * _PRMasterEnv  offsets:
    6.57 + * _PRTopEnv  offsets:
    6.58   * 0x00 coreCtlrReturnPt
    6.59   * 0x100 masterLock
    6.60   */
     7.1 --- a/PR.h	Wed Sep 19 23:12:44 2012 -0700
     7.2 +++ b/PR.h	Tue Oct 23 23:46:17 2012 -0700
     7.3 @@ -32,15 +32,17 @@
     7.4  //
     7.5  typedef unsigned long long    TSCount;
     7.6  
     7.7 -typedef struct _AnimSlot     AnimSlot;
     7.8 -typedef struct _PRReqst      PRReqst;
     7.9 +typedef struct _AnimSlot      AnimSlot;
    7.10 +typedef struct _PRReqst       PRReqst;
    7.11  typedef struct _SlaveVP       SlaveVP;
    7.12  typedef struct _MasterVP      MasterVP;
    7.13  typedef struct _IntervalProbe IntervalProbe;
    7.14 +typedef struct _PRMetaTask    PRMetaTask;
    7.15  
    7.16  
    7.17  typedef SlaveVP *(*SlaveAssigner)  ( void *, AnimSlot*); //semEnv, slot for HW info
    7.18  typedef void     (*RequestHandler) ( SlaveVP *, void * ); //prWReqst, semEnv
    7.19 +typedef void     (*IndivReqHandler)( SlaveVP *, void * ); //prWReqst, semEnv
    7.20  typedef void     (*TopLevelFnPtr)  ( void *, SlaveVP * ); //initData, animSlv
    7.21  typedef void       TopLevelFn      ( void *, SlaveVP * ); //initData, animSlv
    7.22  typedef void     (*ResumeSlvFnPtr) ( SlaveVP *, void * );
    7.23 @@ -57,25 +59,41 @@
    7.24  //============= Request Related ===========
    7.25  //
    7.26  
    7.27 -enum PRReqstType   //avoid starting enums at 0, for debug reasons
    7.28 +enum PRReqstType  //avoid starting enums at 0, for debug reasons
    7.29   {
    7.30 -   semantic = 1,
    7.31 -   createReq,
    7.32 -   dissipate,
    7.33 -   PRSemantic      //goes with PRSemReqst below
    7.34 +   TaskCreate = 1,
    7.35 +   TaskEnd,
    7.36 +   SlvCreate,
    7.37 +   SlvDissipate,
    7.38 +   Language,
    7.39 +   Service,       //To invoke a PR provided equivalent of a language request (ex: probe)
    7.40 +   Hardware,
    7.41 +   IO,
    7.42 +   OSCall
    7.43   };
    7.44  
    7.45  struct _PRReqst
    7.46   {
    7.47 -   enum PRReqstType  reqType;//used for dissipate and in future for IO requests
    7.48 -   void              *semReqData;
    7.49 -
    7.50 +   enum PRReqstType   reqType;//used for special forms that have PR behavior
    7.51 +   void              *semReq;
    7.52 +   PRProcess         *processReqIsIn;
    7.53 +   int32              langMagicNumber;
    7.54 +   PRMetaTask        *metaTask;
    7.55 +   TopLevelFn         topLevelFn;
    7.56 +   void              *initData;
    7.57 +   int32             *ID;
    7.58 +   
    7.59 +      //The request handling structure is a bit messy..  for special forms, 
    7.60 +      // such as create and dissipate, the language inserts pointer to handler
    7.61 +      // fn directly into the request..  might change to this for all requests
    7.62 +   IndivReqHandler    handler; //pointer to handler fn for create, dissip, etc
    7.63 +   
    7.64     PRReqst *nextReqst;
    7.65   };
    7.66  //PRReqst
    7.67  
    7.68 -enum PRSemReqstType   //These are equivalent to semantic requests, but for
    7.69 - {                     // PR's services available directly to app, like OS
    7.70 +enum PRServReqType   //These are equivalent to semantic requests, but for
    7.71 + {                    // PR's services available directly to app, like OS
    7.72     make_probe = 1,    // and probe services -- like a PR-wide built-in lang
    7.73     throw_excp,
    7.74     openFile,
    7.75 @@ -83,13 +101,13 @@
    7.76   };
    7.77  
    7.78  typedef struct
    7.79 - { enum PRSemReqstType reqType;
    7.80 + { enum PRServReqType   reqType;
    7.81     SlaveVP             *requestingSlv;
    7.82     char                *nameStr;  //for create probe
    7.83     char                *msgStr;   //for exception
    7.84     void                *exceptionData;
    7.85   }
    7.86 - PRSemReq;
    7.87 +PRServReq;
    7.88  
    7.89  
    7.90  //====================  Core data structures  ===================
    7.91 @@ -114,9 +132,8 @@
    7.92  
    7.93  enum VPtype 
    7.94   { TaskSlotSlv = 1,//Slave tied to an anim slot, only animates tasks
    7.95 -   TaskExtraSlv,   //When a suspended task ends, the slave becomes this
    7.96 -   PersistentSlv,  //the VP is explicitly seen in the app code, or task suspends
    7.97 -   Slave, //to be removed
    7.98 +   TaskFreeSlv,   //When a suspended task ends, the slave becomes this
    7.99 +   GenericSlv,     //the VP is explicitly seen in the app code, or task suspends
   7.100     Master,
   7.101     Shutdown,
   7.102     Idle
   7.103 @@ -135,22 +152,27 @@
   7.104     
   7.105        //============ below this, no fields are used in asm =============
   7.106     
   7.107 +   void       *startOfStack;  //used to free, and to point slave to Fn
   7.108 +   PRProcess  *processSlaveIsIn;
   7.109 +   PRMetaTask *metaTask;
   7.110 +   enum VPtype typeOfVP;      //Slave vs Master vs Shutdown..
   7.111     int         slaveID;       //each slave given a globally unique ID
   7.112     int         coreAnimatedBy; 
   7.113 -   void       *startOfStack;  //used to free, and to point slave to Fn
   7.114 -   enum VPtype typeOfVP;      //Slave vs Master vs Shutdown..
   7.115 -   int         assignCount;   //Each assign is for one work-unit, so IDs it
   7.116 +   int         numTimesAssignedToASlot;   //Each assign is for one work-unit, so is an ID
   7.117        //note, a scheduling decision is uniquely identified by the triple:
   7.118 -      // <slaveID, coreAnimatedBy, assignCount> -- used in record & replay
   7.119 +      // <slaveID, coreAnimatedBy, numTimesAssignedToASlot> -- used in record & replay
   7.120     
   7.121        //for comm -- between master and coreCtlr & btwn wrapper lib and plugin
   7.122     AnimSlot   *animSlotAssignedTo;
   7.123 -   PRReqst   *request;      //wrapper lib puts in requests, plugin takes out
   7.124 +   PRReqst    *request;      //wrapper lib puts in requests, plugin takes out
   7.125     void       *dataRetFromReq;//Return vals from plugin to Wrapper Lib
   7.126  
   7.127 -      //For using Slave as carrier for data
   7.128 +      //For language specific data that needs to be in the slave
   7.129     void       *semanticData;  //Lang saves lang-specific things in slave here
   7.130  
   7.131 +      //Task related stuff
   7.132 +   bool        needsTaskAssigned;
   7.133 +   
   7.134          //=========== MEASUREMENT STUFF ==========
   7.135           MEAS__Insert_Meas_Fields_into_Slave;
   7.136           float64     createPtInSecs;  //time VP created, in seconds
   7.137 @@ -172,14 +194,12 @@
   7.138        //Basic PR infrastructure
   7.139     SlaveVP        **masterVPs;
   7.140     AnimSlot      ***allAnimSlots;
   7.141 + 
   7.142 +   PRProcess      **processes;
   7.143     
   7.144 -      //plugin related
   7.145 -   PRSemEnv       **langlets;
   7.146 -   
   7.147 -      //Slave creation -- global count of slaves existing, across langs and processes
   7.148 +//move to processEnv      //Slave creation -- global count of slaves existing, across langs and processes
   7.149     int32            numSlavesCreated;  //used to give unique ID to processor
   7.150 -//no reasonable way to do fail-safe when have mult langlets and processes.. have to detect for each langlet separately
   7.151 -//   int32            numSlavesAlive;    //used to detect fail-safe shutdown
   7.152 +   int32            numTasksCreated;   //to give unique ID to a task
   7.153  
   7.154        //Initialization related
   7.155     int32            setupComplete;      //use while starting up coreCtlr
   7.156 @@ -192,14 +212,24 @@
   7.157     uint32_t seed1;
   7.158     uint32_t seed2;
   7.159  
   7.160 +   These_Prob_belong_in_PRPRocess;
   7.161 +//   SlaveVP         *slotTaskSlvs[NUM_CORES][NUM_ANIM_SLOTS];
   7.162 +//   int32            numLiveFreeTaskSlvs;
   7.163 +//   int32            numLiveThreadSlvs;
   7.164 +//   bool32          *coreIsDone;
   7.165 +//   int32            numCoresDone;
   7.166 +   
   7.167 +//   SlaveVP* idleSlv[NUM_CORES][NUM_ANIM_SLOTS];
   7.168 +//   int shutdownInitiated;
   7.169 +   
   7.170        //=========== MEASUREMENT STUFF =============
   7.171         IntervalProbe   **intervalProbes;
   7.172 -       PtrToPrivDynArray *dynIntervalProbesInfo;
   7.173 +       PrivDynArrayInfo *dynIntervalProbesInfo;
   7.174         HashTable        *probeNameHashTbl;
   7.175         int32             masterCreateProbeID;
   7.176         float64           createPtInSecs; //real-clock time PR initialized
   7.177         Histogram       **measHists;
   7.178 -       PtrToPrivDynArray *measHistsInfo;
   7.179 +       PrivDynArrayInfo *measHistsInfo;
   7.180         MEAS__Insert_Susp_Meas_Fields_into_MasterEnv;
   7.181         MEAS__Insert_Master_Meas_Fields_into_MasterEnv;
   7.182         MEAS__Insert_Master_Lock_Meas_Fields_into_MasterEnv;
   7.183 @@ -213,45 +243,112 @@
   7.184  
   7.185  //=====================
   7.186  typedef struct
   7.187 - { int32   langletID; //acts as index into array of langlets in master env
   7.188 -   void   *langletSemEnv;
   7.189 -   int32   langMagicNumber;
   7.190 -   SlaveAssigner    slaveAssigner;
   7.191 -   RequestHandler   requestHandler;
   7.192 -   EndTaskHandler   endTaskHandler;
   7.193 + { int32     langMagicNumber; //indexes into hash array of semEnvs in PRProcess
   7.194 +   PRSemEnv *chainedSemEnv;   //chains to semEnvs with same hash
   7.195 +   void     *langSemEnv;
   7.196     
   7.197 -      //Tack slaves created, separately for each langlet (in each process)
   7.198 -   int32            numSlavesCreated;  //gives ordering to processor creation
   7.199 -   int32            numSlavesAlive;    //used to detect fail-safe shutdown
   7.200 +   SlaveAssigner   slaveAssigner;
   7.201 +   RequestHandler  requestHdlr;
   7.202 +   
   7.203 +   RequestHandler  createTaskHdlr;
   7.204 +   RequestHandler  endTaskHdlr;
   7.205 +   RequestHandler  createSlaveHdlr;
   7.206 +   RequestHandler  dissipateSlaveHdlr;
   7.207 +   RequestHandler  semDataCreator;
   7.208 +   RequestHandler  semDataInitializer;
   7.209 +  
   7.210 +   
   7.211 +      //Track slaves created, separately for each langlet? (in each process)
   7.212 +//   int32            numSlavesCreated;  //gives ordering to processor creation
   7.213 +//   int32            numSlavesAlive;    //used to detect fail-safe shutdown
   7.214     
   7.215        //when multi-lang, master polls sem env's to find one with work in it..
   7.216        // in single-lang case, flag ignored, master always asks lang for work
   7.217 -   int32   hasWork;    
   7.218 +   int32   hasWork;
   7.219   }
   7.220  PRSemEnv;
   7.221  
   7.222 -//=====================  Top Processor level Data Strucs  ======================
   7.223 +//The semantic env of every langlet must start with these two fields, so that
   7.224 +// PR can cast the void * to this struct, in order to access these two fields
   7.225  typedef struct
   7.226 + { int32     langMagicNumber;
   7.227 +   PRSemEnv *protoSemEnv;
   7.228 + }
   7.229 +PRLangSemEnv;
   7.230 +
   7.231 +//can cast any langlet's sem env to one of these, so PR can access values
   7.232 +typedef struct
   7.233 + { int32     langMagicNumber;
   7.234 +   PRSemEnv *protoSemEnv;
   7.235 + }
   7.236 +PRServSemEnv;
   7.237 +
   7.238 +enum PRTaskType
   7.239 + { GenericSlave = 1,
   7.240 +   AtomicTask,
   7.241 +   SuspendedTask
   7.242 + };
   7.243 +
   7.244 +struct _PRMetaTask
   7.245   { 
   7.246 -   
   7.247 +   PRTaskType      taskType;
   7.248 +   RequestHandler  reqHandler;      //Lang-specific hdlr for create, end, etc
   7.249 +   int32          *taskID;          //is standard PR ID
   7.250 +   SlaveVP        *slaveAssignedTo; //no valid until task animated
   7.251 +   TopLevelFn      topLevelFn;      //This is the Fn executes as the task
   7.252 +   void           *initData;        //The data taken by the function
   7.253 +   void           *langMetaTask;
   7.254 +
   7.255 +   //NOTE: info needed for "wait" functionality is inside lang's metaTask
   7.256 + };
   7.257 +//PRMetaTask
   7.258 +
   7.259 +/*The language's meta task is cast to this struct, inside PR, then the 
   7.260 + * back pointer to protoMetaTask is set.  Keeps existence of PRMetaTask hidden
   7.261 + * from plugin -- so can change later.
   7.262 + */
   7.263 +typedef struct
   7.264 + { int32       langMagicNumber;
   7.265 +   PRMetaTask *protoMetaTask;
   7.266   }
   7.267 -PRProcess;
   7.268 +PRLangMetaTask;
   7.269 + 
   7.270 +typedef struct
   7.271 + {
   7.272 +   void (*freeFn)(void *);
   7.273 + }
   7.274 +PRSemDataTemplate;
   7.275 +
   7.276 +typedef struct
   7.277 + { PRSemDataTemplate **semDatas;
   7.278 +   PRSemDataTemplate **semDatasIter;
   7.279 +   int32               numSemDatas;
   7.280 + }
   7.281 +PRSemDataHolder;
   7.282 +//=====================  Top Process level Data Strucs  ======================
   7.283 +
   7.284  /*This structure holds all the information PR needs to manage a program.  PR
   7.285   * stores information about what percent of CPU time the program is getting, 
   7.286   * 
   7.287   */
   7.288  typedef struct
   7.289 - { //void               *semEnv;
   7.290 -   //RequestHdlrFnPtr    requestHandler;
   7.291 -   //SlaveAssignerFnPtr  slaveAssigner;
   7.292 -   int32               numSlavesLive;
   7.293 -   void               *resultToReturn;
   7.294 + { 
   7.295 +   PRSemEnv semEnvs[NUM_SEM_ENVS_IN_PROCESS];    //used as a hash table
   7.296 +   PRSemEnv semEnvList[NUM_SEM_ENVS_IN_PROCESS]; //lines up the semEnvs, so can iterate through
   7.297 +   int32    numSemEnvs;     //must be less than num sem envs.. used to iterate through
   7.298 +    
   7.299 +   int32           numLiveGenericSlaves;
   7.300 +   int32           numLiveFreeTaskSlaves;
   7.301 +   int32           numLiveTasks;
   7.302 +   bool32          coreIsDone[NUM_CORES][CACHE_LINE_SZ]; //Fixes false sharing
   7.303 +   
   7.304 +   void           *resultToReturn;
   7.305    
   7.306     SlaveVP        *seedSlv;   
   7.307     
   7.308 -      //These are used to coordinate within the main function..?
   7.309 +      //These are used to coord with OS thread waiting for process to end
   7.310     bool32          executionIsComplete;
   7.311 -   pthread_mutex_t doneLock; //? not sure need these..?
   7.312 +   pthread_mutex_t doneLock;
   7.313     pthread_cond_t  doneCond;
   7.314   }
   7.315  PRProcess;
   7.316 @@ -280,7 +377,7 @@
   7.317  
   7.318  //=============================  Global Vars ================================
   7.319  
   7.320 -volatile MasterEnv      *_PRMasterEnv __align_to_cacheline__;
   7.321 +volatile MasterEnv      *_PRTopEnv __align_to_cacheline__;
   7.322  
   7.323     //these are global, but only used for startup and shutdown
   7.324  pthread_t       coreCtlrThdHandles[ NUM_CORES ]; //pthread's virt-procr state
   7.325 @@ -315,9 +412,6 @@
   7.326  void
   7.327  PR__start();
   7.328  
   7.329 -void
   7.330 -PR_SS__start_the_work_then_wait_until_done();
   7.331 -
   7.332  SlaveVP* 
   7.333  PR_SS__create_shutdown_slave();
   7.334  
   7.335 @@ -328,8 +422,7 @@
   7.336  PR_SS__cleanup_at_end_of_shutdown();
   7.337  
   7.338  void
   7.339 -PR_SS__register_langlets_semEnv( PRSemEnv *semEnv, int32 VSs_MAGIC_NUMBER, 
   7.340 -                              SlaveVP  *seedVP );
   7.341 +PR_SS__register_langlets_semEnv( PRSemEnv *semEnv, SlaveVP  *seedVP, int32 VSs_MAGIC_NUMBER );
   7.342  
   7.343  
   7.344  //==============    ===============
   7.345 @@ -339,35 +432,45 @@
   7.346  #define PR_PI__create_slaveVP PR_int__create_slaveVP
   7.347  #define PR_WL__create_slaveVP PR_int__create_slaveVP
   7.348  
   7.349 -   //Use this to create processor inside entry point & other places outside
   7.350 -   // the PR system boundary (IE, don't animate with a SlaveVP or MasterVP)
   7.351 +inline 
   7.352  SlaveVP *
   7.353 -PR_ext__create_slaveVP( TopLevelFnPtr fnPtr, void *dataParam );
   7.354 +PR_int__create_slot_slave();
   7.355  
   7.356 -inline SlaveVP *
   7.357 +inline 
   7.358 +SlaveVP *
   7.359  PR_int__create_slaveVP_helper( SlaveVP *newSlv,       TopLevelFnPtr  fnPtr,
   7.360                                  void      *dataParam, void           *stackLocs );
   7.361  
   7.362 -inline void
   7.363 +inline
   7.364 +PRMetaTask *
   7.365 +PR_int__create_generic_slave_meta_task( void *initData );
   7.366 +
   7.367 +inline
   7.368 +void
   7.369  PR_int__reset_slaveVP_to_TopLvlFn( SlaveVP *slaveVP, TopLevelFnPtr fnPtr,
   7.370                                void    *dataParam);
   7.371  
   7.372 -inline void
   7.373 +inline
   7.374 +void
   7.375  PR_int__point_slaveVP_to_OneParamFn( SlaveVP *slaveVP, void *fnPtr,
   7.376                                void    *param);
   7.377  
   7.378 -inline void
   7.379 +inline
   7.380 +void
   7.381  PR_int__point_slaveVP_to_TwoParamFn( SlaveVP *slaveVP, void *fnPtr,
   7.382                                void    *param1, void *param2);
   7.383  
   7.384 +inline
   7.385  void
   7.386  PR_int__dissipate_slaveVP( SlaveVP *slaveToDissipate );
   7.387  #define PR_PI__dissipate_slaveVP PR_int__dissipate_slaveVP
   7.388  //WL: dissipate a SlaveVP by sending a request
   7.389  
   7.390 +inline
   7.391  void
   7.392 -PR_ext__dissipate_slaveVP( SlaveVP *slaveToDissipate );
   7.393 +PR_int__dissipate_slaveVP_multilang( SlaveVP *slaveToDissipate );
   7.394  
   7.395 +inline
   7.396  void
   7.397  PR_int__throw_exception( char *msgStr, SlaveVP *reqstSlv, PRExcp *excpData );
   7.398  #define PR_PI__throw_exception  PR_int__throw_exception
   7.399 @@ -375,17 +478,51 @@
   7.400  PR_WL__throw_exception( char *msgStr, SlaveVP *reqstSlv,  PRExcp *excpData );
   7.401  #define PR_App__throw_exception PR_WL__throw_exception
   7.402  
   7.403 +inline
   7.404  void *
   7.405 -PR_int__give_sem_env_for( SlaveVP *animSlv );
   7.406 -#define PR_PI__give_sem_env_for  PR_int__give_sem_env_for
   7.407 -#define PR_SS__give_sem_env_for  PR_int__give_sem_env_for
   7.408 -//No WL version -- not safe!  if use in WL, be sure data rd & wr is stable
   7.409 +PR_int__give_sem_env_for_slave( SlaveVP *slave, int32 magicNumber );
   7.410 +#define PR_PI__give_sem_env_for  PR_int__give_sem_env_for_slave
   7.411 +#define PR_SS__give_sem_env_for_slave  PR_int__give_sem_env_for_slave
   7.412 +//No WL version -- not safe!  if use env in WL, be sure data rd & wr is stable
   7.413 +inline
   7.414 +PRSemEnv *
   7.415 +PR_int__give_proto_sem_env_for_slave( SlaveVP *slave, int32 magicNumber );
   7.416 +#define PR_PI__give_proto_sem_env_for  PR_int__give_proto_sem_env_for_slave
   7.417 +#define PR_SS__give_proto_sem_env_for_slave  PR_int__give_proto_sem_env_for_slave
   7.418 +//No WL version -- not safe!  if use env in WL, be sure data rd & wr is stable
   7.419 +inline
   7.420 +void *
   7.421 +PR_int__give_sem_env_from_process( PRProcess *process, int32 magicNumer );
   7.422 +#define PR_PI__give_sem_env_from_process  PR_int__give_sem_env_from_process
   7.423 +#define PR_SS__give_sem_env_from_process  PR_int__give_sem_env_from_process
   7.424 +//#define PR_WL__give_sem_env_from_process  PR_int__give_sem_env_from_process
   7.425 +//No WL version -- not safe!  if use env in WL, be sure data rd & wr is stable
   7.426  
   7.427 +inline
   7.428 +void *
   7.429 +PR_int__give_sem_data( SlaveVP *slave, int32 magicNumer );
   7.430 +#define PR_PI__give_sem_data  PR_int__give_sem_data
   7.431 +#define PR_SS__give_sem_data  PR_int__give_sem_data
   7.432 +#define PR_WL__give_sem_data  PR_int__give_sem_data
   7.433 +
   7.434 +
   7.435 +#define PR_int__give_lang_meta_task( slave, magicNumber )\
   7.436 +        slave->metaTask->langMetaTask;
   7.437 +#define PR_PI__give_lang_meta_task  PR_int__give_lang_meta_task
   7.438 +#define PR_SS__give_lang_meta_task  PR_int__give_lang_meta_task
   7.439 +#define PR_WL__give_lang_meta_task  PR_int__give_lang_meta_task
   7.440 +
   7.441 +inline
   7.442 +SlaveVP *
   7.443 +PR_PI__give_slave_assigned_to( PRLangMetaTask *langMetaTask );
   7.444 +        
   7.445 +void 
   7.446 +idle_fn(void* data, SlaveVP *animatingSlv);
   7.447  
   7.448  inline void
   7.449  PR_int__get_master_lock();
   7.450  
   7.451 -#define PR_int__release_master_lock() _PRMasterEnv->masterLock = UNLOCKED
   7.452 +#define PR_int__release_master_lock() _PRTopEnv->masterLock = UNLOCKED
   7.453  
   7.454  inline uint32_t
   7.455  PR_int__randomNumber();
   7.456 @@ -393,13 +530,13 @@
   7.457  //==============  Request Related  ===============
   7.458  
   7.459  void
   7.460 -PR_int__suspend_slaveVP_and_send_req( SlaveVP *callingSlv );
   7.461 +PR_WL__suspend_slaveVP_and_send_req( SlaveVP *callingSlv );
   7.462  
   7.463  inline void
   7.464  PR_WL__add_sem_request_in_mallocd_PRReqst( void *semReqData, SlaveVP *callingSlv );
   7.465  
   7.466  inline void
   7.467 -PR_WL__send_sem_request( void *semReqData, SlaveVP *callingSlv );
   7.468 +PR_WL__send_sem_request( void *semReq, SlaveVP *callingSlv, int32 magicNum );
   7.469  
   7.470  void
   7.471  PR_WL__send_create_slaveVP_req( void *semReqData, SlaveVP *reqstingSlv );
   7.472 @@ -408,7 +545,7 @@
   7.473  PR_WL__send_dissipate_req( SlaveVP *prToDissipate );
   7.474  
   7.475  inline void
   7.476 -PR_WL__send_PRSem_request( void *semReqData, SlaveVP *callingSlv );
   7.477 +PR_WL__send_service_request( void *semReqData, SlaveVP *callingSlv );
   7.478  
   7.479  PRReqst *
   7.480  PR_PI__take_next_request_out_of( SlaveVP *slaveWithReq );
   7.481 @@ -419,7 +556,7 @@
   7.482  #define PR_PI__take_sem_reqst_from( req ) req->semReqData
   7.483  
   7.484  void inline
   7.485 -PR_PI__handle_PRSemReq( PRReqst *req, SlaveVP *requestingSlv, void *semEnv,
   7.486 +PR_int__handle_PRServiceReq( PRReqst *req, SlaveVP *requestingSlv, void *semEnv,
   7.487                         ResumeSlvFnPtr resumeSlvFnPtr );
   7.488  
   7.489  //======================== MEASUREMENT ======================
   7.490 @@ -434,6 +571,14 @@
   7.491  PR_int__strDup( char *str );
   7.492  
   7.493  
   7.494 +//=========================  PR request handlers  ========================
   7.495 +void inline
   7.496 +handleMakeProbe( PRServReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn );
   7.497 +
   7.498 +void inline
   7.499 +handleThrowException( PRServReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn );
   7.500 +//=======================================================================
   7.501 +
   7.502  //========================= Probes =======================
   7.503  #include "Services_Offered_by_PR/Measurement_and_Stats/probes.h"
   7.504  
     8.1 --- a/PR__PI.c	Wed Sep 19 23:12:44 2012 -0700
     8.2 +++ b/PR__PI.c	Tue Oct 23 23:46:17 2012 -0700
     8.3 @@ -22,13 +22,7 @@
     8.4   * int: internal to the PR implementation
     8.5   */
     8.6  
     8.7 -//=========================  Local Declarations  ========================
     8.8 -void inline
     8.9 -handleMakeProbe( PRSemReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn );
    8.10  
    8.11 -void inline
    8.12 -handleThrowException( PRSemReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn );
    8.13 -//=======================================================================
    8.14  
    8.15   
    8.16  PRReqst *
    8.17 @@ -57,65 +51,5 @@
    8.18  */
    8.19  
    8.20  
    8.21 -/* This is for OS requests and PR infrastructure requests, such as to create
    8.22 - *  a probe -- a probe is inside the heart of PR-core, it's not part of any
    8.23 - *  language -- but it's also a semantic thing that's triggered from and used
    8.24 - *  in the application.. so it crosses abstractions..  so, need some special
    8.25 - *  pattern here for handling such requests.
    8.26 - * Doing this just like it were a second language sharing PR-core.
    8.27 - * 
    8.28 - * This is called from the language's request handler when it sees a request
    8.29 - *  of type PRSemReq
    8.30 - *
    8.31 - * TODO: Later change this, to give probes their own separate plugin & have
    8.32 - *  PR-core steer the request to appropriate plugin
    8.33 - * Do the same for OS calls -- look later at it..
    8.34 - */
    8.35 -void inline
    8.36 -PR_PI__handle_PRSemReq( PRReqst *req, SlaveVP *requestingSlv, void *semEnv,
    8.37 -                       ResumeSlvFnPtr resumeFn )
    8.38 - { PRSemReq *semReq;
    8.39  
    8.40 -   semReq = PR_PI__take_sem_reqst_from(req);
    8.41 -   if( semReq == NULL ) return;
    8.42 -   switch( semReq->reqType )  //sem handlers are all in other file
    8.43 -    {
    8.44 -      case make_probe:      handleMakeProbe(   semReq, semEnv, resumeFn);
    8.45 -         break;
    8.46 -      case throw_excp:  handleThrowException(  semReq, semEnv, resumeFn);
    8.47 -         break;
    8.48 -    }
    8.49 - }
    8.50  
    8.51 -/*
    8.52 - */
    8.53 -void inline
    8.54 -handleMakeProbe( PRSemReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn )
    8.55 - { IntervalProbe *newProbe;
    8.56 -
    8.57 -   newProbe          = PR_int__malloc( sizeof(IntervalProbe) );
    8.58 -   newProbe->nameStr = PR_int__strDup( semReq->nameStr );
    8.59 -   newProbe->hist    = NULL;
    8.60 -   newProbe->schedChoiceWasRecorded = FALSE;
    8.61 -
    8.62 -      //This runs in masterVP, so no race-condition worries
    8.63 -   newProbe->probeID =
    8.64 -            addToDynArray( newProbe, _PRMasterEnv->dynIntervalProbesInfo );
    8.65 -
    8.66 -   semReq->requestingSlv->dataRetFromReq = newProbe;
    8.67 -
    8.68 -   //This in inside PR, while resume_slaveVP fn is inside language, so pass
    8.69 -   // pointer from lang to here, then call it.
    8.70 -   (*resumeFn)( semReq->requestingSlv, semEnv );
    8.71 - }
    8.72 -
    8.73 -void inline
    8.74 -handleThrowException( PRSemReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn )
    8.75 - {
    8.76 -   PR_int__throw_exception(  semReq->msgStr, semReq->requestingSlv, semReq->exceptionData );
    8.77 -   
    8.78 -   (*resumeFn)( semReq->requestingSlv, semEnv );
    8.79 - }
    8.80 -
    8.81 -
    8.82 -
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/PR__SS.c	Tue Oct 23 23:46:17 2012 -0700
     9.3 @@ -0,0 +1,697 @@
     9.4 +/*
     9.5 + * Copyright 2010  OpenSourceStewardshipFoundation
     9.6 + *
     9.7 + * Licensed under BSD
     9.8 + */
     9.9 +
    9.10 +#include <stdio.h>
    9.11 +#include <stdlib.h>
    9.12 +#include <string.h>
    9.13 +#include <malloc.h>
    9.14 +#include <inttypes.h>
    9.15 +#include <sys/time.h>
    9.16 +#include <pthread.h>
    9.17 +
    9.18 +#include "PR.h"
    9.19 +
    9.20 +
    9.21 +#define thdAttrs NULL
    9.22 +
    9.23 +
    9.24 +/* MEANING OF   WL  PI  SS  int
    9.25 + * These indicate which places the function is safe to use.  They stand for:
    9.26 + * WL: Wrapper Library
    9.27 + * PI: Plugin 
    9.28 + * SS: Startup and Shutdown
    9.29 + * int: internal to the PR implementation
    9.30 + */
    9.31 +
    9.32 +
    9.33 +//===========================================================================
    9.34 +AnimSlot **
    9.35 +create_anim_slots( int32 coreSlotsAreOn );
    9.36 +
    9.37 +void
    9.38 +create_masterEnv();
    9.39 +
    9.40 +void
    9.41 +create_the_coreCtlr_OS_threads();
    9.42 +
    9.43 +MallocProlog *
    9.44 +create_free_list();
    9.45 +
    9.46 +void
    9.47 +endOSThreadFn( void *initData, SlaveVP *animatingSlv );
    9.48 +
    9.49 +
    9.50 +//===========================================================================
    9.51 +
    9.52 +/*Setup has two phases:
    9.53 + * 1) Semantic layer first calls init_PR, which creates masterEnv, and puts
    9.54 + *    the master Slv into the work-queue, ready for first "call"
    9.55 + * 2) Semantic layer then does its own init, which creates the seed virt
    9.56 + *    slave inside the semantic layer, ready to assign it when
    9.57 + *    asked by the first run of the animationMaster.
    9.58 + *
    9.59 + *This part is bit weird because PR really wants to be "always there", and
    9.60 + * have applications attach and detach..  for now, this PR is part of
    9.61 + * the app, so the PR system starts up as part of running the app.
    9.62 + *
    9.63 + *The semantic layer is isolated from the PR internals by making the
    9.64 + * semantic layer do setup to a state that it's ready with its
    9.65 + * initial Slvs, ready to assign them to slots when the animationMaster
    9.66 + * asks.  Without this pattern, the semantic layer's setup would
    9.67 + * have to modify slots directly to assign the initial virt-procrs, and put
    9.68 + * them into the readyToAnimateQ itself, breaking the isolation completely.
    9.69 + *
    9.70 + * 
    9.71 + *The semantic layer creates the initial Slv(s), and adds its
    9.72 + * own environment to masterEnv, and fills in the pointers to
    9.73 + * the requestHandler and slaveAssigner plug-in functions
    9.74 + */
    9.75 +
    9.76 +/*This allocates PR data structures, populates the master PRProc,
    9.77 + * and master environment, and returns the master environment to the semantic
    9.78 + * layer.
    9.79 + */
    9.80 +void
    9.81 +PR__start()
    9.82 + {
    9.83 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    9.84 +      create_masterEnv();
    9.85 +      printf( "\n\n Running in SEQUENTIAL mode \n\n" );
    9.86 +   #else
    9.87 +      create_masterEnv();
    9.88 +      DEBUG__printf1(dbgInfra,"Offset of lock in masterEnv: %d ", (int32)offsetof(MasterEnv,masterLock) );
    9.89 +      create_the_coreCtlr_OS_threads();
    9.90 +   #endif
    9.91 + }
    9.92 +
    9.93 +
    9.94 +/*A process is represented by a structure that holds all the process-specific
    9.95 + * information:
    9.96 + *-] The hash-array containing the semantic environs of any langlets started
    9.97 + *   inside the process.
    9.98 + *-] Flags used to detect the end of activity in the process
    9.99 + *-] Counter of num live slaves and num live tasks in the process
   9.100 + * 
   9.101 + *PR automatically generates the seedVP when it creates the process, and
   9.102 + * inserts the processID of the newly created process into it. 
   9.103 + */
   9.104 +PRProcess *
   9.105 +PR__create_process( TopLevelFnPtr seed_Fn, void *seedData )
   9.106 + { SlaveVP    *seedSlv;
   9.107 +   PRProcess  *process;
   9.108 +   PRMetaTask *metaTask;
   9.109 +   PRSemEnv   *semEnvs;
   9.110 +   int32       idx;
   9.111 +   
   9.112 +   process = malloc( sizeof(PRProcess) );
   9.113 +   process->numSemEnvs = 0;
   9.114 +   semEnvs = process->semEnvs;
   9.115 +   for( idx = 0; idx < NUM_SEM_ENVS_IN_PROCESS; idx++ )
   9.116 +    { semEnvs[idx].langSemEnv = NULL;
   9.117 +      semEnvs[idx].chainedSemEnv = NULL;
   9.118 +    }
   9.119 +         
   9.120 +      //A Process starts with one slave, the seed slave
   9.121 +   seedSlv = PR_int__create_slaveVP( seed_Fn, seedData );
   9.122 +   
   9.123 +   seedSlv->processSlaveIsIn = process;
   9.124 +   
   9.125 +      //seed slave is a generic slave, so make a generic slave meta task for it
   9.126 +   metaTask          = PR_int__create_generic_slave_meta_task( seedData );
   9.127 +   seedSlv->metaTask = metaTask;
   9.128 +   
   9.129 +   process->numLiveGenericSlaves = 1; //count the  seed
   9.130 +   process->numLiveTasks = 0;
   9.131 +   
   9.132 +   PRServSemEnv *
   9.133 +   servicesSemEnv = PR_SS__malloc( sizeof(PRServSemEnv) );
   9.134 +   PR_SS__register_langlets_semEnv( servicesSemEnv, seedSlv, PRSERV_MAGIC_NUMBER );
   9.135 +
   9.136 +      //resume seedVP into PR's built-in services language's semantic env
   9.137 +   PRServ__resume_slaveVP( seedSlv, servicesSemEnv );
   9.138 +   
   9.139 +   return process;
   9.140 + }
   9.141 +
   9.142 +
   9.143 +/*This gets the process struct out of the seedVP, then gets the semEnv-holding
   9.144 + * struct out of that, then inserts the semantic env into that struct, using
   9.145 + * the magic number as the key to the sem env placement.  The master will 
   9.146 + * use the magic number from a request to retrieve the semantic env appropriate
   9.147 + * for the construct that made the request.
   9.148 + */
   9.149 +void
   9.150 +PR_SS__register_langlets_semEnv( void *_semEnv, SlaveVP  *seedVP, int32 magicNum )
   9.151 + { PRSemEnv     *protoSemEnv;
   9.152 +   PRProcess    *process;
   9.153 +   PRServSemEnv *semEnv = (PRServSemEnv *)_semEnv;
   9.154 +
   9.155 +   process     = seedVP->processSlaveIsIn;
   9.156 +   
   9.157 +   protoSemEnv = PR_int__create_proto_sem_env_in_process( process, magicNum );
   9.158 +   protoSemEnv->langSemEnv      = semEnv;
   9.159 +   protoSemEnv->langMagicNumber = magicNum;
   9.160 +   protoSemEnv->hasWork         = FALSE;
   9.161 +   
   9.162 +   semEnv->protoSemEnv          = protoSemEnv;
   9.163 + }
   9.164 +
   9.165 +/*These store the pointer to handler into the semantic env -- semantic env
   9.166 + * found by using magic num to look it up in the process that the seedVP
   9.167 + * is inside of.
   9.168 + */
   9.169 +void
   9.170 +PR_SS__register_create_task_handler( RequestHandler createTaskHandler, SlaveVP *seedVP, int32 magicNum )
   9.171 + { PRSemEnv *semEnv;
   9.172 + 
   9.173 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.174 +   semEnv->createTaskHdlr = createTaskHandler; 
   9.175 + }
   9.176 +void
   9.177 +PR_SS__register_end_task_handler( RequestHandler endTaskHandler, SlaveVP *seedVP, int32 magicNum )
   9.178 + { PRSemEnv *semEnv;
   9.179 + 
   9.180 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.181 +   semEnv->endTaskHdlr = endTaskHandler; 
   9.182 + }
   9.183 +void
   9.184 +PR_SS__register_create_slave_handler( RequestHandler createSlvHandler, SlaveVP *seedVP, int32 magicNum )
   9.185 + { PRSemEnv *semEnv;
   9.186 + 
   9.187 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.188 +   semEnv->createSlaveHdlr = createSlvHandler; 
   9.189 + }
   9.190 +void
   9.191 +PR_SS__register_dissipate_slave_handler( RequestHandler dissipateHandler, SlaveVP *seedVP, int32 magicNum )
   9.192 + { PRSemEnv *semEnv;
   9.193 + 
   9.194 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.195 +   semEnv->dissipateSlaveHdlr = dissipateHandler; 
   9.196 + }
   9.197 +void
   9.198 +PR_SS__register_request_handler( RequestHandler reqHandler, SlaveVP *seedVP, int32 magicNum )
   9.199 + { PRSemEnv *semEnv;
   9.200 + 
   9.201 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.202 +   semEnv->requestHdlr = reqHandler; 
   9.203 + }
   9.204 +void
   9.205 +PR_SS__register_assigner( SlaveAssigner assigner, SlaveVP *seedVP, int32 magicNum )
   9.206 + { PRSemEnv *semEnv;
   9.207 + 
   9.208 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.209 +   semEnv->slaveAssigner = assigner; 
   9.210 + }
   9.211 +void
   9.212 +PR_SS__register_sem_data_creator( SemDataCreator semDataCreator, 
   9.213 +                                              SlaveVP *seedVP, int32 magicNum )
   9.214 + { PRSemEnv *semEnv;
   9.215 + 
   9.216 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.217 +   semEnv->semDataCreator = semDataCreator; 
   9.218 + }
   9.219 +void
   9.220 +PR_SS__register_sem_data_initializer( SemDataInitializer semDataInitializer, 
   9.221 +                                              SlaveVP *seedVP, int32 magicNum )
   9.222 + { PRSemEnv *semEnv;
   9.223 + 
   9.224 +   semEnv = PR_SS__give_proto_sem_env_for_slave( seedVP, magicNum );
   9.225 +   semEnv->semDataInitializer = semDataInitializer; 
   9.226 + }
   9.227 +
   9.228 +
   9.229 +/*TODO: finish implementing
   9.230 + *This function returns information about the version of PR, the language
   9.231 + * the program is being run in, its version, and information on the 
   9.232 + * hardware.
   9.233 + */
   9.234 +/*
   9.235 +char *
   9.236 +PR_App__give_environment_string()
   9.237 + {
   9.238 +   //--------------------------
   9.239 +    fprintf(output, "#\n# >> Build information <<\n");
   9.240 +    fprintf(output, "# GCC VERSION: %d.%d.%d\n",__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__);
   9.241 +    fprintf(output, "# Build Date: %s %s\n", __DATE__, __TIME__);
   9.242 +    
   9.243 +    fprintf(output, "#\n# >> Hardware information <<\n");
   9.244 +    fprintf(output, "# Hardware Architecture: ");
   9.245 +   #ifdef __x86_64
   9.246 +    fprintf(output, "x86_64");
   9.247 +   #endif //__x86_64
   9.248 +   #ifdef __i386
   9.249 +    fprintf(output, "x86");
   9.250 +   #endif //__i386
   9.251 +    fprintf(output, "\n");
   9.252 +    fprintf(output, "# Number of Cores: %d\n", NUM_CORES);
   9.253 +   //--------------------------
   9.254 +    
   9.255 +   //PR Plugins
   9.256 +    fprintf(output, "#\n# >> PR Plugins <<\n");
   9.257 +    fprintf(output, "# Language : ");
   9.258 +    fprintf(output, _LANG_NAME_);
   9.259 +    fprintf(output, "\n");
   9.260 +       //Meta info gets set by calls from the language during its init,
   9.261 +       // and info registered by calls from inside the application
   9.262 +    fprintf(output, "# Assigner: %s\n", _PRTopEnv->metaInfo->assignerInfo);
   9.263 +
   9.264 +   //--------------------------
   9.265 +   //Application
   9.266 +    fprintf(output, "#\n# >> Application <<\n");
   9.267 +    fprintf(output, "# Name: %s\n", _PRTopEnv->metaInfo->appInfo);
   9.268 +    fprintf(output, "# Data Set:\n%s\n",_PRTopEnv->metaInfo->inputSet);
   9.269 +    
   9.270 +   //--------------------------
   9.271 + }
   9.272 + */
   9.273 + 
   9.274 +
   9.275 +/*A pointer to the startup-function for the language is given as the last
   9.276 + * argument to the call.  Use this to initialize a program in the language.
   9.277 + * This creates a data structure that encapsulates the bookkeeping info
   9.278 + * PR uses to track and schedule a program run.
   9.279 + */
   9.280 +/*PRProcess *
   9.281 +PR__spawn_program_on_data_in_Lang( TopLevelFnPtr seed_fn, void *data )
   9.282 + { PRProcess *newProcess;
   9.283 +   newProcess = malloc( sizeof(PRProcess) );
   9.284 +   
   9.285 +   newProcess->doneLock = PTHREAD_MUTEX_INITIALIZER;
   9.286 +   newProcess->doneCond = PTHREAD_COND_INITIALIZER;
   9.287 +   newProcess->executionIsComplete = FALSE;
   9.288 +   newProcess->numSlavesLive = 0;
   9.289 +   
   9.290 +   newProcess->dataForSeed = data;
   9.291 +   newProcess->seedFnPtr   = prog_seed_fn;
   9.292 +   
   9.293 +      //The language's spawn-process function fills in the plugin function-ptrs in
   9.294 +      // the PRProcess struct, gives the struct to PR, which then makes and
   9.295 +      // queues the seed SlaveVP, which starts processors made from the code being
   9.296 +      // animated.
   9.297 +    
   9.298 +   (*langInitFnPtr)( newProcess );  
   9.299 +   
   9.300 +   return newProcess;
   9.301 + }
   9.302 +*/
   9.303 +
   9.304 +
   9.305 +/*When all SlaveVPs owned by the program-run associated to the process have
   9.306 + * dissipated, then return from this call.  There is no language to cleanup,
   9.307 + * and PR does not shutdown..  but the process bookkeeping structure,
   9.308 + * which is used by PR to track and schedule the program, is freed.
   9.309 + *The PRProcess structure is kept until this call collects the results from it,
   9.310 + * then freed.  If the process is not done yet when PR gets this
   9.311 + * call, then this call waits..  the challenge here is that this call comes from
   9.312 + * a live OS thread that's outside PR..  so, inside here, it waits on a 
   9.313 + * condition..  then it's a PR thread that signals this to wake up..
   9.314 + *First checks whether the process is done, if yes, calls the clean-up fn then
   9.315 + * returns the result extracted from the PRProcess struct.
   9.316 + *If process not done yet, then performs a wait (in a loop to be sure the
   9.317 + * wakeup is not spurious, which can happen).  PR registers the wait, and upon
   9.318 + * the process ending (last SlaveVP owned by it dissipates), then PR signals
   9.319 + * this to wakeup.  This then calls the cleanup fn and returns the result.
   9.320 + */
   9.321 +/*
   9.322 +void *
   9.323 +PR_App__give_results_when_done_for( PRProcess *process )
   9.324 + { void *result;
   9.325 +   
   9.326 +   pthread_mutex_lock( process->doneLock );
   9.327 +   while( !(process->executionIsComplete) )
   9.328 +    {
   9.329 +      pthread_cond_wait( process->doneCond,
   9.330 +                         process->doneLock );
   9.331 +    }
   9.332 +   pthread_mutex_unlock( process->doneLock );
   9.333 +   
   9.334 +   result = process->resultToReturn;
   9.335 +   
   9.336 +   PR_int__cleanup_process_after_done( process );
   9.337 +   free( process );  //was malloc'd above, so free it here
   9.338 +   
   9.339 +   return result;
   9.340 + }
   9.341 +*/
   9.342 +
   9.343 +
   9.344 +void
   9.345 +create_masterEnv()
   9.346 + { MasterEnv       *masterEnv;
   9.347 +   PRQueueStruc   **readyToAnimateQs;
   9.348 +   int              coreIdx;
   9.349 +   SlaveVP        **masterVPs;
   9.350 +   AnimSlot      ***allAnimSlots; //ptr to array of ptrs
   9.351 +
   9.352 +
   9.353 +      //Make the master env, which holds everything else
   9.354 +   _PRTopEnv = malloc( sizeof(MasterEnv) );
   9.355 +
   9.356 +        //Very first thing put into the master env is the free-list, seeded
   9.357 +        // with a massive initial chunk of memory.
   9.358 +        //After this, all other mallocs are PR__malloc.
   9.359 +   _PRTopEnv->freeLists        = PR_ext__create_free_list();
   9.360 +   
   9.361 +   
   9.362 +   //===================== Only PR__malloc after this ====================
   9.363 +   masterEnv     = (MasterEnv*)_PRTopEnv;
   9.364 +   
   9.365 +      //Make a readyToAnimateQ for each core controller
   9.366 +   readyToAnimateQs = PR_int__malloc( NUM_CORES * sizeof(PRQueueStruc *) );
   9.367 +   masterVPs        = PR_int__malloc( NUM_CORES * sizeof(SlaveVP *) );
   9.368 +
   9.369 +      //One array for each core, several in array, core's masterVP scheds all
   9.370 +   allAnimSlots    = PR_int__malloc( NUM_CORES * sizeof(AnimSlot *) );
   9.371 +
   9.372 +   _PRTopEnv->numSlavesAlive = 0;  //used to detect shut-down condition
   9.373 +
   9.374 +//========================================
   9.375 +   
   9.376 +   Copied__fixThis;
   9.377 +   
   9.378 +   semEnv->freeExtraTaskSlvQ    = makePRQ();
   9.379 +   semEnv->numLiveExtraTaskSlvs   = 0; //must be last
   9.380 +   semEnv->numLiveThreadSlvs      = 1; //must be last, counts the seed
   9.381 +   
   9.382 +   semEnv->shutdownInitiated = FALSE;
   9.383 +   semEnv->coreIsDone = PR_int__malloc( NUM_CORES * sizeof( bool32 ) );
   9.384 +   
   9.385 +      //For each animation slot, there is an idle slave, and an initial
   9.386 +      // slave assigned as the current-task-slave.  Create them here.
   9.387 +   int32    coreNum,  slotNum;
   9.388 +   SlaveVP *idleSlv, *slotTaskSlv;
   9.389 +   for( coreNum = 0;  coreNum < NUM_CORES; coreNum++ )
   9.390 +    { semEnv->coreIsDone[coreNum] = FALSE; //use during shutdown
   9.391 +    
   9.392 +      for( slotNum = 0; slotNum < NUM_ANIM_SLOTS; ++slotNum )
   9.393 +       { idleSlv = PR__create_slave_helper( &idle_fn, NULL, semEnv, 0);
   9.394 +         idleSlv->coreAnimatedBy                = coreNum;
   9.395 +         idleSlv->animSlotAssignedTo            =
   9.396 +                               _PRTopEnv->allAnimSlots[coreNum][slotNum];
   9.397 +         _PRTopEnv->idleSlv[coreNum][slotNum] = idleSlv;
   9.398 +         
   9.399 +         slotTaskSlv = PR_int__create_slot_slave( );
   9.400 +         slotTaskSlv->coreAnimatedBy            = coreNum;
   9.401 +         slotTaskSlv->animSlotAssignedTo        = 
   9.402 +                               _PRTopEnv->allAnimSlots[coreNum][slotNum];
   9.403 +         
   9.404 +         slotTaskSlv->needsTaskAssigned = TRUE;
   9.405 +         slotTaskSlv->slaveType         = SlotTaskSlv;
   9.406 +         _PRTopEnv->slotTaskSlvs[coreNum][slotNum] = slotTaskSlv;
   9.407 +       }
   9.408 +    }
   9.409 +
   9.410 +      //create the recycle queue where free task slaves are put after their task ends
   9.411 +   semEnv->freeTaskSlvRecycleQ  = makePRQ();
   9.412 +   
   9.413 +
   9.414 +   semEnv->numLiveFreeTaskSlvs   = 0;
   9.415 +   semEnv->numLiveGenericSlvs    = 0; //none existent yet.. "create process" creates the seeds  
   9.416 +//==================================================================
   9.417 +   
   9.418 +   _PRTopEnv->numSlavesCreated = 0;  //used by create slave to set slave ID
   9.419 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   9.420 +    {    
   9.421 +      readyToAnimateQs[ coreIdx ] = makePRQ();
   9.422 +      
   9.423 +         //Q: should give masterVP core-specific info as its init data?
   9.424 +      masterVPs[ coreIdx ] = PR_int__create_slaveVP( (TopLevelFnPtr)&animationMaster, (void*)masterEnv );
   9.425 +      masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
   9.426 +      masterVPs[ coreIdx ]->typeOfVP = Master;
   9.427 +      allAnimSlots[ coreIdx ] = create_anim_slots( coreIdx ); //makes for one core
   9.428 +    }
   9.429 +   _PRTopEnv->masterVPs        = masterVPs;
   9.430 +   _PRTopEnv->masterLock       = UNLOCKED;
   9.431 +   _PRTopEnv->seed1 = rand()%1000; // init random number generator
   9.432 +   _PRTopEnv->seed2 = rand()%1000; // init random number generator
   9.433 +   _PRTopEnv->allAnimSlots    = allAnimSlots;
   9.434 +   _PRTopEnv->measHistsInfo = NULL; 
   9.435 +
   9.436 +   //============================= MEASUREMENT STUFF ========================
   9.437 +      
   9.438 +         MEAS__Make_Meas_Hists_for_Susp_Meas;
   9.439 +         MEAS__Make_Meas_Hists_for_Master_Meas;
   9.440 +         MEAS__Make_Meas_Hists_for_Master_Lock_Meas;
   9.441 +         MEAS__Make_Meas_Hists_for_Malloc_Meas;
   9.442 +         MEAS__Make_Meas_Hists_for_Plugin_Meas;
   9.443 +         MEAS__Make_Meas_Hists_for_Language;
   9.444 +
   9.445 +         PROBES__Create_Probe_Bookkeeping_Vars;
   9.446 +         
   9.447 +         HOLISTIC__Setup_Perf_Counters;
   9.448 +         
   9.449 +   //========================================================================
   9.450 + }
   9.451 +
   9.452 +AnimSlot **
   9.453 +create_anim_slots( int32 coreSlotsAreOn )
   9.454 + { AnimSlot  **animSlots;
   9.455 +   int i;
   9.456 +
   9.457 +   animSlots  = PR_int__malloc( NUM_ANIM_SLOTS * sizeof(AnimSlot *) );
   9.458 +
   9.459 +   for( i = 0; i < NUM_ANIM_SLOTS; i++ )
   9.460 +    {
   9.461 +      animSlots[i] = PR_int__malloc( sizeof(AnimSlot) );
   9.462 +
   9.463 +         //Set state to mean "handling requests done, slot needs filling"
   9.464 +      animSlots[i]->workIsDone         = FALSE;
   9.465 +      animSlots[i]->needsSlaveAssigned = TRUE;
   9.466 +      animSlots[i]->slotIdx            = i; //quick retrieval of slot pos
   9.467 +      animSlots[i]->coreSlotIsOn       = coreSlotsAreOn;
   9.468 +    }
   9.469 +   return animSlots;
   9.470 + }
   9.471 +
   9.472 +
   9.473 +void
   9.474 +freeAnimSlots( AnimSlot **animSlots )
   9.475 + { int i;
   9.476 +   for( i = 0; i < NUM_ANIM_SLOTS; i++ )
   9.477 +    {
   9.478 +      PR_int__free( animSlots[i] );
   9.479 +    }
   9.480 +   PR_int__free( animSlots );
   9.481 + }
   9.482 +
   9.483 +
   9.484 +void
   9.485 +create_the_coreCtlr_OS_threads()
   9.486 + {
   9.487 +   //========================================================================
   9.488 +   //                      Create the Threads
   9.489 +   int coreIdx, retCode;
   9.490 +
   9.491 +      //Need the threads to be created suspended, and wait for a signal
   9.492 +      // before proceeding -- gives time after creating to initialize other
   9.493 +      // stuff before the coreCtlrs set off.
   9.494 +   _PRTopEnv->setupComplete = 0;
   9.495 +   
   9.496 +      //initialize the cond used to make the new threads wait and sync up
   9.497 +      //must do this before *creating* the threads..
   9.498 +   pthread_mutex_init( &suspendLock, NULL );
   9.499 +   pthread_cond_init( &suspendCond, NULL );
   9.500 +
   9.501 +      //Make the threads that animate the core controllers
   9.502 +   for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   9.503 +    { coreCtlrThdParams[coreIdx]          = PR_int__malloc( sizeof(ThdParams) );
   9.504 +      coreCtlrThdParams[coreIdx]->coreNum = coreIdx;
   9.505 +
   9.506 +      retCode =
   9.507 +      pthread_create( &(coreCtlrThdHandles[coreIdx]),
   9.508 +                        thdAttrs,
   9.509 +                       &coreController,
   9.510 +               (void *)(coreCtlrThdParams[coreIdx]) );
   9.511 +      if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
   9.512 +    }
   9.513 + }
   9.514 +
   9.515 +
   9.516 +/*This is what causes the PR system to initialize.. then waits for it to
   9.517 + * exit.
   9.518 + * 
   9.519 + *Wrapper lib layer calls this when it wants the system to start running..
   9.520 + */
   9.521 +/*
   9.522 +void
   9.523 +PR_SS__start_the_work_then_wait_until_done()
   9.524 + { 
   9.525 +#ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   9.526 +   //Only difference between version with an OS thread pinned to each core and
   9.527 +   // the sequential version of PR is PR__init_Seq, this, and coreCtlr_Seq.
   9.528 +   //
   9.529 +         //Instead of un-suspending threads, just call the one and only
   9.530 +         // core ctlr (sequential version), in the main thread.
   9.531 +      coreCtlr_Seq( NULL );
   9.532 +      flushRegisters();
   9.533 +#else
   9.534 +   int coreIdx;
   9.535 +      //Start the core controllers running
   9.536 +   
   9.537 +      //tell the core controller threads that setup is complete
   9.538 +      //get lock, to lock out any threads still starting up -- they'll see
   9.539 +      // that setupComplete is true before entering while loop, and so never
   9.540 +      // wait on the condition
   9.541 +   pthread_mutex_lock(     &suspendLock );
   9.542 +   _PRTopEnv->setupComplete = 1;
   9.543 +   pthread_mutex_unlock(   &suspendLock );
   9.544 +   pthread_cond_broadcast( &suspendCond );
   9.545 +   
   9.546 +   
   9.547 +      //wait for all to complete
   9.548 +   for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   9.549 +    {
   9.550 +      pthread_join( coreCtlrThdHandles[coreIdx], NULL );
   9.551 +    }
   9.552 +   
   9.553 +      //NOTE: do not clean up PR env here -- semantic layer has to have
   9.554 +      // a chance to clean up its environment first, then do a call to free
   9.555 +      // the Master env and rest of PR locations
   9.556 +#endif
   9.557 + }
   9.558 +*/
   9.559 +
   9.560 +SlaveVP* PR_SS__create_shutdown_slave()
   9.561 + {
   9.562 +   SlaveVP* shutdownVP;
   9.563 +   
   9.564 +   shutdownVP = PR_int__create_slaveVP( &endOSThreadFn, NULL );
   9.565 +   shutdownVP->typeOfVP = Shutdown;
   9.566 +    
   9.567 +   return shutdownVP;
   9.568 + }
   9.569 +
   9.570 +//TODO: look at architecting cleanest separation between request handler
   9.571 +// and animation master, for dissipate, create, shutdown, and other non-semantic
   9.572 +// requests.  Issue is chain: one removes requests from AppSlv, one dispatches
   9.573 +// on type of request, and one handles each type..  but some types require
   9.574 +// action from both request handler and animation master -- maybe just give the
   9.575 +// request handler calls like:  PR__handle_X_request_type
   9.576 +
   9.577 +
   9.578 +/*This is called by the semantic layer's request handler when it decides its
   9.579 + * time to shut down the PR system.  Calling this causes the core controller OS
   9.580 + * threads to exit, which unblocks the entry-point function that started up
   9.581 + * PR, and allows it to grab the result and return to the original single-
   9.582 + * threaded application.
   9.583 + * 
   9.584 + *The _PRTopEnv is needed by this shut down function, so the create-seed-
   9.585 + * and-wait function has to free a bunch of stuff after it detects the
   9.586 + * threads have all died: the masterEnv, the thread-related locations,
   9.587 + * masterVP any AppSlvs that might still be allocated and sitting in the
   9.588 + * semantic environment, or have been orphaned in the _PRWorkQ.
   9.589 + * 
   9.590 + *NOTE: the semantic plug-in is expected to use PR__malloc to get all the
   9.591 + * locations it needs, and give ownership to masterVP.  Then, they will be
   9.592 + * automatically freed.
   9.593 + *
   9.594 + *In here,create one core-loop shut-down processor for each core controller and put
   9.595 + * them all directly into the readyToAnimateQ.
   9.596 + *Note, this function can ONLY be called after the semantic environment no
   9.597 + * longer cares if AppSlvs get animated after the point this is called.  In
   9.598 + * other words, this can be used as an abort, or else it should only be
   9.599 + * called when all AppSlvs have finished dissipate requests -- only at that
   9.600 + * point is it sure that all results have completed.
   9.601 + */
   9.602 +void
   9.603 +PR_SS__shutdown()
   9.604 + { int32       coreIdx;
   9.605 +   SlaveVP    *shutDownSlv;
   9.606 +   AnimSlot **animSlots;
   9.607 +      //create the shutdown processors, one for each core controller -- put them
   9.608 +      // directly into the Q -- each core will die when gets one
   9.609 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   9.610 +    {    //Note, this is running in the master
   9.611 +      shutDownSlv = PR_SS__create_shutdown_slave();
   9.612 +         //last slave has dissipated, so no more in slots, so write
   9.613 +         // shut down slave into first animulng slot.
   9.614 +      animSlots = _PRTopEnv->allAnimSlots[ coreIdx ];
   9.615 +      animSlots[0]->slaveAssignedToSlot = shutDownSlv;
   9.616 +      animSlots[0]->needsSlaveAssigned = FALSE;
   9.617 +      shutDownSlv->coreAnimatedBy = coreIdx;
   9.618 +      shutDownSlv->animSlotAssignedTo = animSlots[ 0 ];
   9.619 +    }
   9.620 + }
   9.621 +
   9.622 +
   9.623 +/*Am trying to be cute, avoiding IF statement in coreCtlr that checks for
   9.624 + * a special shutdown slaveVP.  Ended up with extra-complex shutdown sequence.
   9.625 + *This function has the sole purpose of setting the stack and framePtr
   9.626 + * to the coreCtlr's stack and framePtr.. it does that then jumps to the
   9.627 + * core ctlr's shutdown point -- might be able to just call Pthread_exit
   9.628 + * from here, but am going back to the pthread's stack and setting everything
   9.629 + * up just as if it never jumped out, before calling pthread_exit.
   9.630 + *The end-point of core ctlr will free the stack and so forth of the
   9.631 + * processor that animates this function, (this fn is transfering the
   9.632 + * animator of the AppSlv that is in turn animating this function over
   9.633 + * to core controller function -- note that this slices out a level of virtual
   9.634 + * processors).
   9.635 + */
   9.636 +void
   9.637 +endOSThreadFn( void *initData, SlaveVP *animatingSlv )
   9.638 + { 
   9.639 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   9.640 +    asmTerminateCoreCtlrSeq(animatingSlv);
   9.641 +   #else
   9.642 +    asmTerminateCoreCtlr(animatingSlv);
   9.643 +   #endif
   9.644 + }
   9.645 +
   9.646 +
   9.647 +/*This is called from the startup & shutdown
   9.648 + */
   9.649 +void
   9.650 +PR_SS__cleanup_at_end_of_shutdown()
   9.651 + { 
   9.652 +      //Before getting rid of everything, print out any measurements made
   9.653 +   if( _PRTopEnv->measHistsInfo != NULL )
   9.654 +    { forAllInDynArrayDo( _PRTopEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
   9.655 +      forAllInDynArrayDo( _PRTopEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
   9.656 +      forAllInDynArrayDo( _PRTopEnv->measHistsInfo, (DynArrayFnPtr)&freeHist );
   9.657 +    }
   9.658 +   
   9.659 +   MEAS__Print_Hists_for_Susp_Meas;
   9.660 +   MEAS__Print_Hists_for_Master_Meas;
   9.661 +   MEAS__Print_Hists_for_Master_Lock_Meas;
   9.662 +   MEAS__Print_Hists_for_Malloc_Meas;
   9.663 +   MEAS__Print_Hists_for_Plugin_Meas;
   9.664 +   
   9.665 +
   9.666 +      //All the environment data has been allocated with PR__malloc, so just
   9.667 +      // free its internal big-chunk and all inside it disappear.
   9.668 +/*
   9.669 +   readyToAnimateQs = _PRTopEnv->readyToAnimateQs;
   9.670 +   masterVPs        = _PRTopEnv->masterVPs;
   9.671 +   allAnimSlots    = _PRTopEnv->allAnimSlots;
   9.672 +   
   9.673 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   9.674 +    {
   9.675 +      freePRQ( readyToAnimateQs[ coreIdx ] );
   9.676 +         //master Slvs were created external to PR, so use external free
   9.677 +      PR_int__dissipate_slaveVP( masterVPs[ coreIdx ] );
   9.678 +      
   9.679 +      freeAnimSlots( allAnimSlots[ coreIdx ] );
   9.680 +    }
   9.681 +   
   9.682 +   PR_int__free( _PRTopEnv->readyToAnimateQs );
   9.683 +   PR_int__free( _PRTopEnv->masterVPs );
   9.684 +   PR_int__free( _PRTopEnv->allAnimSlots );
   9.685 +   
   9.686 +   //============================= MEASUREMENT STUFF ========================
   9.687 +   #ifdef PROBES__TURN_ON_STATS_PROBES
   9.688 +   freeDynArrayDeep( _PRTopEnv->dynIntervalProbesInfo, &PR_WL__free_probe);
   9.689 +   #endif
   9.690 +   //========================================================================
   9.691 +*/
   9.692 +      //These are the only two that use system free 
   9.693 +   PR_ext__free_free_list( _PRTopEnv->freeLists );
   9.694 +   free( (void *)_PRTopEnv );
   9.695 + }
   9.696 +
   9.697 +
   9.698 +//================================
   9.699 +
   9.700 +
    10.1 --- a/PR__WL.c	Wed Sep 19 23:12:44 2012 -0700
    10.2 +++ b/PR__WL.c	Tue Oct 23 23:46:17 2012 -0700
    10.3 @@ -24,6 +24,20 @@
    10.4  
    10.5  
    10.6  
    10.7 +
    10.8 +inline int32 *
    10.9 +PR__give_task_ID( SlaveVP *animSlv, int32 magicNumber )
   10.10 + {
   10.11 +   return animSlv->metaTask->taskID;
   10.12 + }
   10.13 +
   10.14 +SlaveVP *
   10.15 +PR__give_slave_of_task_ID( int32 *taskID, SlaveVP *animSlv )
   10.16 + {
   10.17 +   metaTask = lookup( taskID );
   10.18 +   return metaTask->slaveAssignedTo;
   10.19 + }
   10.20 +
   10.21  /*For this implementation of PR, it may not make much sense to have the
   10.22   * system of requests for creating a new processor done this way.. but over
   10.23   * the scope of single-master, multi-master, mult-tasking, OS-implementing,
   10.24 @@ -38,15 +52,18 @@
   10.25   * to the plugin.
   10.26   */
   10.27  void
   10.28 -PR_WL__send_create_slaveVP_req( void *semReqData, SlaveVP *reqstingSlv )
   10.29 +PR_WL__send_create_slaveVP_req( void *semReq, int32 *slvID, SlaveVP *reqstingSlv, 
   10.30 +                                int32 magicNum )
   10.31   { PRReqst req;
   10.32  
   10.33 -   req.reqType          = createReq;
   10.34 -   req.semReqData       = semReqData;
   10.35 -   req.nextReqst        = reqstingSlv->request;
   10.36 +   req.reqType          = SlvCreate;
   10.37 +   req.ID               = slvID;
   10.38 +   req.langMagicNumber  = magicNum;
   10.39 +   req.semReq           = semReq;
   10.40 +//   req.nextReqst        = reqstingSlv->request;
   10.41     reqstingSlv->request = &req;
   10.42  
   10.43 -   PR_int__suspend_slaveVP_and_send_req( reqstingSlv );
   10.44 +   PR_WL__suspend_slaveVP_and_send_req( reqstingSlv );
   10.45   }
   10.46  
   10.47  
   10.48 @@ -75,13 +92,42 @@
   10.49  PR_WL__send_dissipate_req( SlaveVP *slaveToDissipate )
   10.50   { PRReqst req;
   10.51  
   10.52 -   req.reqType                = dissipate;
   10.53 -   req.nextReqst              = slaveToDissipate->request;
   10.54 +   req.reqType                = SlvDissipate;
   10.55 +//   req.nextReqst              = slaveToDissipate->request;
   10.56     slaveToDissipate->request = &req;
   10.57  
   10.58 -   PR_int__suspend_slaveVP_and_send_req( slaveToDissipate );
   10.59 +   PR_WL__suspend_slaveVP_and_send_req( slaveToDissipate );
   10.60   }
   10.61  
   10.62 +inline
   10.63 +void
   10.64 +PR_WL__send_create_task_req( TopLevelFn fn, void *initData, void *semReq, 
   10.65 +                             int32 *taskID, SlaveVP *animSlv, int32 magicNumber)
   10.66 + { PRReqst req;
   10.67 + 
   10.68 +   req.reqType    = TaskCreate;
   10.69 +   req.topLevelFn = fn;
   10.70 +   req.initData   = initData;
   10.71 +   req.ID         = taskID;
   10.72 +   req.semReq     = semReq;
   10.73 +   req.langMagicNumber = magicNumber;
   10.74 +   animSlv->request = &req;
   10.75 +   
   10.76 +   PR_WL__suspend_slaveVP_and_send_req( animSlv );
   10.77 + }
   10.78 +
   10.79 +inline
   10.80 +void
   10.81 +PR_WL__send_end_task_request( void *semReq, SlaveVP *animSlv, int32 magicNum )
   10.82 + { PRReqst req;
   10.83 + 
   10.84 +   req.reqType    = TaskEnd;
   10.85 +   req.semReq     = semReq;
   10.86 +   req.langMagicNumber = magicNum;
   10.87 +   animSlv->request = &req;
   10.88 +   
   10.89 +   PR_WL__suspend_slaveVP_and_send_req( animSlv );
   10.90 + }
   10.91  
   10.92  
   10.93  /*This call's name indicates that request is malloc'd -- so req handler
   10.94 @@ -100,43 +146,55 @@
   10.95   { PRReqst *req;
   10.96  
   10.97     req = PR_int__malloc( sizeof(PRReqst) );
   10.98 -   req->reqType         = semantic;
   10.99 -   req->semReqData      = semReqData;
  10.100 +   req->reqType         = Language;
  10.101 +   req->semReq          = semReqData;
  10.102     req->nextReqst       = callingSlv->request;
  10.103 -   callingSlv->request = req;
  10.104 +   callingSlv->request  = req;
  10.105   }
  10.106  
  10.107 -/*This inserts the semantic-layer's request data into standard PR carrier
  10.108 - * request data-struct is allocated on stack of this call & ptr to it sent
  10.109 +inline int32 *
  10.110 +PR_WL__create_taskID_of_size( int32 numInts )
  10.111 + { int32 *taskID;
  10.112 +   
  10.113 +   taskID    = PR_WL__malloc( sizeof(int32) + numInts * sizeof(int32) );
  10.114 +   taskID[0] = numInts;
  10.115 +   return taskID;
  10.116 + }
  10.117 +
  10.118 +/*This inserts the semantic-layer's request data into standard PR carrier.
  10.119 + * PR Request data-struct is allocated on stack of this call & ptr to it sent
  10.120   * to plugin
  10.121   *Then it does suspend, to cause request to be sent.
  10.122   */
  10.123  inline void
  10.124 -PR_WL__send_sem_request( void *semReqData, SlaveVP *callingSlv )
  10.125 +PR_WL__send_sem_request( void *semReqData, SlaveVP *callingSlv, int32 magicNum )
  10.126   { PRReqst req;
  10.127  
  10.128 -   req.reqType         = semantic;
  10.129 -   req.semReqData      = semReqData;
  10.130 +   req.reqType         = Language;
  10.131 +   req.langMagicNumber = magicNum;
  10.132 +   req.
  10.133 +   req.semReq          = semReqData;
  10.134     req.nextReqst       = callingSlv->request;
  10.135     callingSlv->request = &req;
  10.136     
  10.137 -   PR_int__suspend_slaveVP_and_send_req( callingSlv );
  10.138 +   PR_WL__suspend_slaveVP_and_send_req( callingSlv );
  10.139   }
  10.140  
  10.141  
  10.142 -/*May 2012 Not sure what this is..  looks like old idea for PR semantic
  10.143 - * request
  10.144 +/*This sends a PRLang request -- for probe, exception, and so on..
  10.145 + * 
  10.146   */
  10.147  inline void
  10.148 -PR_WL__send_PRSem_request( void *semReqData, SlaveVP *callingSlv )
  10.149 +PR_WL__send_service_request( void *semReqData, SlaveVP *callingSlv )
  10.150   { PRReqst req;
  10.151  
  10.152 -   req.reqType         = PRSemantic;
  10.153 -   req.semReqData      = semReqData;
  10.154 -   req.nextReqst       = callingSlv->request; //gab any other preceeding 
  10.155 +   req.reqType         = PRLang;
  10.156 +   req.langMagicNumber = PRLang_MAGIC_NUMBER;
  10.157 +   req.semReq          = semReqData;
  10.158 +   req.nextReqst       = callingSlv->request; //grab any other preceeding 
  10.159     callingSlv->request = &req;
  10.160  
  10.161 -   PR_int__suspend_slaveVP_and_send_req( callingSlv );
  10.162 +   PR_WL__suspend_slaveVP_and_send_req( callingSlv );
  10.163   }
  10.164  
  10.165  /*May 2012
  10.166 @@ -146,15 +204,15 @@
  10.167  void
  10.168  PR_WL__throw_exception( char *msgStr, SlaveVP *reqstSlv,  PRExcp *excpData )
  10.169   { PRReqst req;
  10.170 -   PRSemReq semReq;
  10.171 +   PRServReq semReq;
  10.172  
  10.173     req.reqType         = PRSemantic;
  10.174 -   req.semReqData      = &semReq;
  10.175 +   req.semReq      = &semReq;
  10.176     req.nextReqst       = reqstSlv->request; //gab any other preceeding 
  10.177     reqstSlv->request   = &req;
  10.178  
  10.179     semReq.msgStr        = msgStr;
  10.180     semReq.exceptionData = excpData;
  10.181     
  10.182 -   PR_int__suspend_slaveVP_and_send_req( reqstSlv );
  10.183 +   PR_WL__suspend_slaveVP_and_send_req( reqstSlv );
  10.184   }
    11.1 --- a/PR__int.c	Wed Sep 19 23:12:44 2012 -0700
    11.2 +++ b/PR__int.c	Tue Oct 23 23:46:17 2012 -0700
    11.3 @@ -22,41 +22,123 @@
    11.4   * int: internal to the PR implementation
    11.5   */
    11.6  
    11.7 +//===========================================================================
    11.8 +//
    11.9 +//===========================================================================
   11.10  
   11.11  inline SlaveVP *
   11.12 -PR_int__create_slaveVP( TopLevelFnPtr fnPtr, void *dataParam )
   11.13 +PR_int__create_slaveVP( TopLevelFnPtr fnPtr, void *dataParam, PRProcess *process )
   11.14   { SlaveVP *newSlv;
   11.15 -   void      *stackLocs;
   11.16 +   void    *stackLocs;
   11.17 +
   11.18 +   PR_int__create_slaveVP_helper( fnPtr, dataParam );
   11.19 +           
   11.20 +   process->numLiveGenericSlaves += 1;
   11.21 +
   11.22 +   newSlv->needsTaskAssigned   = TRUE;
   11.23 +   newSlv->metaTask            = NULL;
   11.24 +   newSlv->typeOfVP            = GenericSlave;
   11.25 +   
   11.26 +   return newSlv;
   11.27 + }
   11.28 +
   11.29 +
   11.30 +inline SlaveVP *
   11.31 +PR_int__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *dataParam )
   11.32 + { SlaveVP *newSlv;
   11.33 +   void    *stackLocs;
   11.34  
   11.35     newSlv      = PR_int__malloc( sizeof(SlaveVP) );
   11.36     stackLocs   = PR_int__malloc( VIRT_PROCR_STACK_SIZE );
   11.37     if( stackLocs == 0 )
   11.38      { perror("PR_int__malloc stack"); exit(1); }
   11.39  
   11.40 -   _PRMasterEnv->numSlavesAlive += 1;
   11.41 +   newSlv->startOfStack = stackLocs;
   11.42 +   newSlv->slaveID      = _PRTopEnv->numSlavesCreated++;
   11.43 +   newSlv->request      = NULL;
   11.44 +   newSlv->animSlotAssignedTo = NULL;
   11.45 +      
   11.46 +   newSlv->numTimesAssignedToASlot  = 0;  
   11.47  
   11.48 -   return PR_int__create_slaveVP_helper( newSlv, fnPtr, dataParam, stackLocs );
   11.49 +   #ifdef MODE__MULTI_LANG
   11.50 +   PRSemDataHolder *
   11.51 +   semDataHolder = PR_int__malloc( sizeof(PRSemDataHolder) );
   11.52 +   newSlv->semanticData = semDataHolder;
   11.53 +   #else
   11.54 +   newSlv->semanticData = NULL;
   11.55 +   #endif
   11.56 +
   11.57 +   PR_int__reset_slaveVP_to_TopLvlFn( newSlv, fnPtr, dataParam );
   11.58 +   
   11.59 +   //============================= MEASUREMENT STUFF ========================
   11.60 +   #ifdef PROBES__TURN_ON_STATS_PROBES
   11.61 +   //TODO: make this TSCHiLow or generic equivalent
   11.62 +   //struct timeval timeStamp;
   11.63 +   //gettimeofday( &(timeStamp), NULL);
   11.64 +   //newSlv->createPtInSecs = timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0) -
   11.65 +   //                                           _PRTopEnv->createPtInSecs;
   11.66 +   #endif
   11.67 +   //========================================================================
   11.68 +
   11.69 +   return newSlv;
   11.70   }
   11.71  
   11.72 -/* "ext" designates that it's for use outside the PR system -- should only
   11.73 - * be called from main thread or other thread -- never from code animated by
   11.74 - * a PR virtual processor.
   11.75 - */
   11.76 -inline SlaveVP *
   11.77 -PR_ext__create_slaveVP( TopLevelFnPtr fnPtr, void *dataParam )
   11.78 - { SlaveVP *newSlv;
   11.79 -   char      *stackLocs;
   11.80 -
   11.81 -   newSlv      = malloc( sizeof(SlaveVP) );
   11.82 -   stackLocs  = malloc( VIRT_PROCR_STACK_SIZE );
   11.83 -   if( stackLocs == 0 )
   11.84 -    { perror("malloc stack"); exit(1); }
   11.85 -
   11.86 -   _PRMasterEnv->numSlavesAlive += 1;
   11.87 -
   11.88 -   return PR_int__create_slaveVP_helper(newSlv, fnPtr, dataParam, stackLocs);
   11.89 +SlaveVP *
   11.90 +PR_int__create_slot_slave()
   11.91 + { 
   11.92 +    fixme;
   11.93   }
   11.94  
   11.95 +void idle_fn(void* data, SlaveVP *animatingSlv)
   11.96 + {
   11.97 +   while(1)
   11.98 +    { PR_WL__suspend_slaveVP_and_send_req( animatingSlv );
   11.99 +    }
  11.100 + }
  11.101 +
  11.102 +
  11.103 +PRMetaTask *
  11.104 +PR_int__create_generic_slave_meta_task( void *initData )
  11.105 + { PRMetaTask *newStub;
  11.106 +         
  11.107 +   newStub = PR_PI__malloc( sizeof(PRMetaTask) );
  11.108 +   newStub->slaveAssignedTo = NULL; //set later
  11.109 +   newStub->taskType        = IS_A_GENERIC_SLV;
  11.110 +   newStub->taskID          = NULL;
  11.111 +
  11.112 +   return newStub;
  11.113 + }
  11.114 +
  11.115 +
  11.116 +/* This is for OS requests and PR infrastructure requests, such as to create
  11.117 + *  a probe -- a probe is inside the heart of PR-core, it's not part of any
  11.118 + *  language -- but it's also a semantic thing that's triggered from and used
  11.119 + *  in the application.. so it crosses abstractions..  so, need some special
  11.120 + *  pattern here for handling such requests.
  11.121 + * Doing this just like it were a second language sharing PR-core.
  11.122 + * 
  11.123 + * This is called from the language's request handler when it sees a request
  11.124 + *  of type PRSemReq
  11.125 + *
  11.126 + * TODO: Later change this, to give probes their own separate plugin & have
  11.127 + *  PR-core steer the request to appropriate plugin
  11.128 + * Do the same for OS calls -- look later at it..
  11.129 + */
  11.130 +void inline
  11.131 +PR_int__handle_PRServiceReq( PRReqst *req, SlaveVP *requestingSlv, void *semEnv,
  11.132 +                       ResumeSlvFnPtr resumeFn )
  11.133 + { PRServReq *semReq;
  11.134 +
  11.135 +   semReq = PR_PI__take_sem_reqst_from(req);
  11.136 +   if( semReq == NULL ) return;
  11.137 +   switch( semReq->reqType )  //sem handlers are all in other file
  11.138 +    {
  11.139 +      case make_probe:      handleMakeProbe(   semReq, semEnv, resumeFn);
  11.140 +         break;
  11.141 +      case throw_excp:  handleThrowException(  semReq, semEnv, resumeFn);
  11.142 +         break;
  11.143 +    }
  11.144 + }
  11.145  
  11.146  //===========================================================================
  11.147  /*there is a label inside this function -- save the addr of this label in
  11.148 @@ -69,7 +151,7 @@
  11.149   * next work-unit for that slave.
  11.150   */
  11.151  void
  11.152 -PR_int__suspend_slaveVP_and_send_req( SlaveVP *animatingSlv )
  11.153 +PR_WL__suspend_slaveVP_and_send_req( SlaveVP *animatingSlv )
  11.154   { 
  11.155  
  11.156        //This suspended Slv will get assigned by Master again at some
  11.157 @@ -96,32 +178,6 @@
  11.158   }
  11.159  
  11.160  
  11.161 -/* "ext" designates that it's for use outside the PR system -- should only
  11.162 - * be called from main thread or other thread -- never from code animated by
  11.163 - * a SlaveVP, nor from a masterVP.
  11.164 - *
  11.165 - *Use this version to dissipate Slvs created outside the PR system.
  11.166 - */
  11.167 -void
  11.168 -PR_ext__dissipate_slaveVP( SlaveVP *slaveToDissipate )
  11.169 - {
  11.170 -   _PRMasterEnv->numSlavesAlive -= 1;
  11.171 -   if( _PRMasterEnv->numSlavesAlive == 0 )
  11.172 -    {    //no more work, so shutdown
  11.173 -      PR_SS__shutdown();  //note, creates shut-down slaves on each core
  11.174 -    }
  11.175 -
  11.176 -   //NOTE: dataParam was given to the processor, so should either have
  11.177 -      // been alloc'd with PR_int__malloc, or freed by the level above animSlv.
  11.178 -      //So, all that's left to free here is the stack and the SlaveVP struc
  11.179 -      // itself
  11.180 -      //Note, should not stack-allocate the data param -- no guarantee, in
  11.181 -      // general that creating processor will outlive ones it creates.
  11.182 -   free( slaveToDissipate->startOfStack );
  11.183 -   free( slaveToDissipate );
  11.184 - }
  11.185 -
  11.186 -
  11.187  
  11.188  /*This must be called by the request handler plugin -- it cannot be called
  11.189   * from the semantic library "dissipate processor" function -- instead, the
  11.190 @@ -143,11 +199,11 @@
  11.191  void
  11.192  PR_int__dissipate_slaveVP( SlaveVP *animatingSlv )
  11.193   {
  11.194 -         DEBUG__printf2(dbgRqstHdlr, "PR int dissipate slaveID: %d, alive: %d",animatingSlv->slaveID, _PRMasterEnv->numSlavesAlive-1);
  11.195 +         DEBUG__printf2(dbgRqstHdlr, "PR int dissipate slaveID: %d, alive: %d",animatingSlv->slaveID, _PRTopEnv->numSlavesAlive-1);
  11.196        //dis-own all locations owned by this processor, causing to be freed
  11.197        // any locations that it is (was) sole owner of
  11.198 -   _PRMasterEnv->numSlavesAlive -= 1;
  11.199 -   if( _PRMasterEnv->numSlavesAlive == 0 )
  11.200 +   _PRTopEnv->numSlavesAlive -= 1;
  11.201 +   if( _PRTopEnv->numSlavesAlive == 0 )
  11.202      {    //no more work, so shutdown
  11.203        PR_SS__shutdown();  //note, creates shut-down processor on each core
  11.204      }
  11.205 @@ -162,41 +218,114 @@
  11.206     PR_int__free( animatingSlv );
  11.207   }
  11.208  
  11.209 +/*In multi-lang mode, there are multiple semData in the slave..  
  11.210 + * 
  11.211 + *At some point want to recycle rather than free..
  11.212 + * 
  11.213 + *For now, iterate through semData, call registered free-er on each, then
  11.214 + * free the basic slave
  11.215 + */
  11.216 +void
  11.217 +PR_int__dissipate_slaveVP_multilang( SlaveVP *slave )
  11.218 + { PRSemDataHolder   *semDataHolder;
  11.219 +   PRSemDataTemplate *semData;
  11.220 +   int32              idx;
  11.221 +   
  11.222 +   semDataHolder = (PRSemDataHolder *)slave->semanticData;
  11.223 +   for(idx = 0; idx < semDataHolder->numSemDatas; idx++)
  11.224 +    { 
  11.225 +      semData = semDataHolder->semDatas[idx];
  11.226 +      (*(semData->freeFn))(semData); //this Fn is lang-spec 
  11.227 +    }
  11.228 +   
  11.229 +   PR_int__free( slave->startOfStack );
  11.230 +   PR_int__free( slave );   
  11.231 + }
  11.232 +
  11.233 +inline
  11.234 +void *
  11.235 +PR_int__give_semEnv_of_req( PRReqst *req, SlaveVP *requestingSlv )
  11.236 + {     
  11.237 +   return PR_int__give_sem_env_for_process( requestingSlv->processSlaveIsIn, 
  11.238 +                                                         req->langMagicNumber );
  11.239 + }
  11.240 +
  11.241  /*Anticipating multi-tasking
  11.242   */
  11.243 +inline
  11.244  void *
  11.245 -PR_int__give_sem_env_for( SlaveVP *animSlv )
  11.246 - {
  11.247 -   return _PRMasterEnv->semanticEnv;
  11.248 +PR_int__give_sem_env_for_slave( SlaveVP *slave, int32 magicNum )
  11.249 + {    
  11.250 +   return PR_int__give_sem_env_for_process( slave->processSlaveIsIn, magicNum );
  11.251 + }
  11.252 +inline
  11.253 +PRSemEnv *
  11.254 +PR_int__give_proto_sem_env_for_slave( SlaveVP *slave, int32 magicNum )
  11.255 + {    
  11.256 +   return PR_int__give_proto_sem_env_for_process( slave->processSlaveIsIn, magicNum );
  11.257   }
  11.258  
  11.259 -/*
  11.260 - *
  11.261 - */
  11.262 -inline SlaveVP *
  11.263 -PR_int__create_slaveVP_helper( SlaveVP *newSlv,    TopLevelFnPtr  fnPtr,
  11.264 -                     void    *dataParam, void          *stackLocs )
  11.265 - {
  11.266 -   newSlv->startOfStack = stackLocs;
  11.267 -   newSlv->slaveID      = _PRMasterEnv->numSlavesCreated++;
  11.268 -   newSlv->request     = NULL;
  11.269 -   newSlv->animSlotAssignedTo    = NULL;
  11.270 -   newSlv->typeOfVP     = Slave;
  11.271 -   newSlv->assignCount  = 0;
  11.272 +inline
  11.273 +void *
  11.274 +PR_int__give_sem_env_for_process( PRProcess *process, int32 magicNum )
  11.275 + { PRSemEnv *protoSemEnv;
  11.276  
  11.277 -   PR_int__reset_slaveVP_to_TopLvlFn( newSlv, fnPtr, dataParam );
  11.278 -           
  11.279 -   //============================= MEASUREMENT STUFF ========================
  11.280 -   #ifdef PROBES__TURN_ON_STATS_PROBES
  11.281 -   //TODO: make this TSCHiLow or generic equivalent
  11.282 -   //struct timeval timeStamp;
  11.283 -   //gettimeofday( &(timeStamp), NULL);
  11.284 -   //newSlv->createPtInSecs = timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0) -
  11.285 -   //                                           _PRMasterEnv->createPtInSecs;
  11.286 -   #endif
  11.287 -   //========================================================================
  11.288 +   protoSemEnv = lookup_proto_sem_env_in_array( process->semEnvs, magicNum );
  11.289 +   return protoSemEnv->langSemEnv;
  11.290 + }
  11.291 +inline
  11.292 +PRSemEnv *
  11.293 +PR_int__give_proto_sem_env_for_process( PRProcess *process, int32 magicNum )
  11.294 + { 
  11.295 +   return lookup_proto_sem_env_in_array( process->semEnvs, magicNum );
  11.296 + }
  11.297  
  11.298 -   return newSlv;
  11.299 +inline
  11.300 +PRSemEnv *
  11.301 +lookup_proto_sem_env_in_array( PRSemEnv *semEnvs, int32 magicNum )
  11.302 + { PRSemEnv *retEnv;
  11.303 +   int32 idx;
  11.304 + 
  11.305 +   idx = magicNum & 63; //mask off, leaving lowest 6 bits
  11.306 +   retEnv =  &(semEnvs[idx]); //is array of structs, so take addr
  11.307 +   while( retEnv->langMagicNumber != magicNum ) //assume magicNums unique
  11.308 +    { retEnv = retEnv->chainedSemEnv;
  11.309 +      if( retEnv == NULL ) goto NotFound;
  11.310 +    }
  11.311 +   return retEnv;
  11.312 +   
  11.313 + NotFound:
  11.314 +   return NULL;
  11.315 + }
  11.316 +
  11.317 +inline
  11.318 +PRSemEnv *
  11.319 +PR_int__create_proto_sem_env_in_process( PRProcess process, int32 magicNum )
  11.320 + { PRSemEnv *semEnvs;
  11.321 +   PRSemEnv *retEnv, *newEnv;
  11.322 +   int32 idx;
  11.323 + 
  11.324 +   semEnvs = process->semEnvs;
  11.325 +   
  11.326 +   idx = magicNum & 63; //mask upper bits off, leaving lowest 6 bits
  11.327 +   retEnv =  &(semEnvs[idx]); //is array of structs, so take addr
  11.328 +   if( retEnv->langSemEnv == NULL ) 
  11.329 +    { //if env that's in array is empty, do nothing, drop down to return sequence
  11.330 +    }
  11.331 +   else //look for last environment in chain
  11.332 +    { while( retEnv->chainedSemEnv != NULL ) 
  11.333 +       { retEnv = retEnv->chainedSemEnv;
  11.334 +       }
  11.335 +         //add a new proto sem env to the end of chain
  11.336 +      newEnv = PR_int__malloc( sizeof(PRSemEnv) );
  11.337 +      newEnv->chainedSemEnv = NULL;
  11.338 +      retEnv->chainedSemEnv = newEnv;
  11.339 +      retEnv = newEnv;
  11.340 +    }
  11.341 +
  11.342 +   process->semEnvList[process->numSemEnvs] = retEnv;
  11.343 +   process->numSemEnvs += 1;
  11.344 +   return retEnv;
  11.345   }
  11.346  
  11.347  
  11.348 @@ -231,7 +360,7 @@
  11.349  PR_int__get_master_lock()
  11.350   { int32 *addrOfMasterLock;
  11.351   
  11.352 -   addrOfMasterLock = &(_PRMasterEnv->masterLock);
  11.353 +   addrOfMasterLock = &(_PRTopEnv->masterLock);
  11.354  
  11.355     int numTriesToGetLock = 0;
  11.356     int gotLock = 0;
  11.357 @@ -263,11 +392,11 @@
  11.358  inline uint32_t
  11.359  PR_int__randomNumber()
  11.360   {
  11.361 -	_PRMasterEnv->seed1 = 36969 * (_PRMasterEnv->seed1 & 65535) + 
  11.362 -                          (_PRMasterEnv->seed1 >> 16);
  11.363 -	_PRMasterEnv->seed2 = 18000 * (_PRMasterEnv->seed2 & 65535) + 
  11.364 -                          (_PRMasterEnv->seed2 >> 16);
  11.365 -	return (_PRMasterEnv->seed1 << 16) + _PRMasterEnv->seed2;
  11.366 +	_PRTopEnv->seed1 = 36969 * (_PRTopEnv->seed1 & 65535) + 
  11.367 +                          (_PRTopEnv->seed1 >> 16);
  11.368 +	_PRTopEnv->seed2 = 18000 * (_PRTopEnv->seed2 & 65535) + 
  11.369 +                          (_PRTopEnv->seed2 >> 16);
  11.370 +	return (_PRTopEnv->seed1 << 16) + _PRTopEnv->seed2;
  11.371   }
  11.372  
  11.373  
    12.1 --- a/PR__startup_and_shutdown.c	Wed Sep 19 23:12:44 2012 -0700
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,601 +0,0 @@
    12.4 -/*
    12.5 - * Copyright 2010  OpenSourceStewardshipFoundation
    12.6 - *
    12.7 - * Licensed under BSD
    12.8 - */
    12.9 -
   12.10 -#include <stdio.h>
   12.11 -#include <stdlib.h>
   12.12 -#include <string.h>
   12.13 -#include <malloc.h>
   12.14 -#include <inttypes.h>
   12.15 -#include <sys/time.h>
   12.16 -#include <pthread.h>
   12.17 -
   12.18 -#include "PR.h"
   12.19 -
   12.20 -
   12.21 -#define thdAttrs NULL
   12.22 -
   12.23 -
   12.24 -/* MEANING OF   WL  PI  SS  int
   12.25 - * These indicate which places the function is safe to use.  They stand for:
   12.26 - * WL: Wrapper Library
   12.27 - * PI: Plugin 
   12.28 - * SS: Startup and Shutdown
   12.29 - * int: internal to the PR implementation
   12.30 - */
   12.31 -
   12.32 -
   12.33 -//===========================================================================
   12.34 -AnimSlot **
   12.35 -create_anim_slots( int32 coreSlotsAreOn );
   12.36 -
   12.37 -void
   12.38 -create_masterEnv();
   12.39 -
   12.40 -void
   12.41 -create_the_coreCtlr_OS_threads();
   12.42 -
   12.43 -MallocProlog *
   12.44 -create_free_list();
   12.45 -
   12.46 -void
   12.47 -endOSThreadFn( void *initData, SlaveVP *animatingSlv );
   12.48 -
   12.49 -
   12.50 -//===========================================================================
   12.51 -
   12.52 -/*Setup has two phases:
   12.53 - * 1) Semantic layer first calls init_PR, which creates masterEnv, and puts
   12.54 - *    the master Slv into the work-queue, ready for first "call"
   12.55 - * 2) Semantic layer then does its own init, which creates the seed virt
   12.56 - *    slave inside the semantic layer, ready to assign it when
   12.57 - *    asked by the first run of the animationMaster.
   12.58 - *
   12.59 - *This part is bit weird because PR really wants to be "always there", and
   12.60 - * have applications attach and detach..  for now, this PR is part of
   12.61 - * the app, so the PR system starts up as part of running the app.
   12.62 - *
   12.63 - *The semantic layer is isolated from the PR internals by making the
   12.64 - * semantic layer do setup to a state that it's ready with its
   12.65 - * initial Slvs, ready to assign them to slots when the animationMaster
   12.66 - * asks.  Without this pattern, the semantic layer's setup would
   12.67 - * have to modify slots directly to assign the initial virt-procrs, and put
   12.68 - * them into the readyToAnimateQ itself, breaking the isolation completely.
   12.69 - *
   12.70 - * 
   12.71 - *The semantic layer creates the initial Slv(s), and adds its
   12.72 - * own environment to masterEnv, and fills in the pointers to
   12.73 - * the requestHandler and slaveAssigner plug-in functions
   12.74 - */
   12.75 -
   12.76 -/*This allocates PR data structures, populates the master PRProc,
   12.77 - * and master environment, and returns the master environment to the semantic
   12.78 - * layer.
   12.79 - */
   12.80 -void
   12.81 -PR__start()
   12.82 - {
   12.83 -   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   12.84 -      create_masterEnv();
   12.85 -      printf( "\n\n Running in SEQUENTIAL mode \n\n" );
   12.86 -   #else
   12.87 -      create_masterEnv();
   12.88 -      DEBUG__printf1(dbgInfra,"Offset of lock in masterEnv: %d ", (int32)offsetof(MasterEnv,masterLock) );
   12.89 -      create_the_coreCtlr_OS_threads();
   12.90 -   #endif
   12.91 - }
   12.92 -
   12.93 -/*This gets the process struct out of the seedVP, then gets the semEnv-holding
   12.94 - * struct out of that, then inserts the semantic env into that struct, using
   12.95 - * the magic number as the key to the sem env placement.  The master will 
   12.96 - * use the magic number from a request to retrieve the semantic env appropriate
   12.97 - * for the construct that made the request.
   12.98 - */
   12.99 -void
  12.100 -PR__register_langlets_semEnv( PRSemEnv *semEnv, int32 magicNumber, 
  12.101 -                              SlaveVP  *seedVP )
  12.102 - { PREnvHolder *envHolder;
  12.103 -   PRProcess   *process;
  12.104 -
  12.105 -   process   = seedVP->process;
  12.106 -   envHolder = process->semEnvHolder;
  12.107 -   
  12.108 -   insert( magicNumber, semEnv, envHolder );
  12.109 - }
  12.110 -
  12.111 -
  12.112 -/*TODO: finish implementing
  12.113 - *This function returns information about the version of PR, the language
  12.114 - * the program is being run in, its version, and information on the 
  12.115 - * hardware.
  12.116 - */
  12.117 -/*
  12.118 -char *
  12.119 -PR_App__give_environment_string()
  12.120 - {
  12.121 -   //--------------------------
  12.122 -    fprintf(output, "#\n# >> Build information <<\n");
  12.123 -    fprintf(output, "# GCC VERSION: %d.%d.%d\n",__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__);
  12.124 -    fprintf(output, "# Build Date: %s %s\n", __DATE__, __TIME__);
  12.125 -    
  12.126 -    fprintf(output, "#\n# >> Hardware information <<\n");
  12.127 -    fprintf(output, "# Hardware Architecture: ");
  12.128 -   #ifdef __x86_64
  12.129 -    fprintf(output, "x86_64");
  12.130 -   #endif //__x86_64
  12.131 -   #ifdef __i386
  12.132 -    fprintf(output, "x86");
  12.133 -   #endif //__i386
  12.134 -    fprintf(output, "\n");
  12.135 -    fprintf(output, "# Number of Cores: %d\n", NUM_CORES);
  12.136 -   //--------------------------
  12.137 -    
  12.138 -   //PR Plugins
  12.139 -    fprintf(output, "#\n# >> PR Plugins <<\n");
  12.140 -    fprintf(output, "# Language : ");
  12.141 -    fprintf(output, _LANG_NAME_);
  12.142 -    fprintf(output, "\n");
  12.143 -       //Meta info gets set by calls from the language during its init,
  12.144 -       // and info registered by calls from inside the application
  12.145 -    fprintf(output, "# Assigner: %s\n", _PRMasterEnv->metaInfo->assignerInfo);
  12.146 -
  12.147 -   //--------------------------
  12.148 -   //Application
  12.149 -    fprintf(output, "#\n# >> Application <<\n");
  12.150 -    fprintf(output, "# Name: %s\n", _PRMasterEnv->metaInfo->appInfo);
  12.151 -    fprintf(output, "# Data Set:\n%s\n",_PRMasterEnv->metaInfo->inputSet);
  12.152 -    
  12.153 -   //--------------------------
  12.154 - }
  12.155 - */
  12.156 - 
  12.157 -
  12.158 -/*A pointer to the startup-function for the language is given as the last
  12.159 - * argument to the call.  Use this to initialize a program in the language.
  12.160 - * This creates a data structure that encapsulates the bookkeeping info
  12.161 - * PR uses to track and schedule a program run.
  12.162 - */
  12.163 -PRProcess *
  12.164 -PR__spawn_program_on_data_in_Lang( TopLevelFnPtr seed_fn, void *data )
  12.165 - { PRProcess *newProcess;
  12.166 -   newProcess = malloc( sizeof(PRProcess) );
  12.167 -   
  12.168 -   newProcess->doneLock = PTHREAD_MUTEX_INITIALIZER;
  12.169 -   newProcess->doneCond = PTHREAD_COND_INITIALIZER;
  12.170 -   newProcess->executionIsComplete = FALSE;
  12.171 -   newProcess->numSlavesLive = 0;
  12.172 -   
  12.173 -   newProcess->dataForSeed = data;
  12.174 -   newProcess->seedFnPtr   = prog_seed_fn;
  12.175 -   
  12.176 -      //The language's spawn-process function fills in the plugin function-ptrs in
  12.177 -      // the PRProcess struct, gives the struct to PR, which then makes and
  12.178 -      // queues the seed SlaveVP, which starts processors made from the code being
  12.179 -      // animated.
  12.180 -    
  12.181 -   (*langInitFnPtr)( newProcess );  
  12.182 -   
  12.183 -   return newProcess;
  12.184 - }
  12.185 -
  12.186 -
  12.187 -/*When all SlaveVPs owned by the program-run associated to the process have
  12.188 - * dissipated, then return from this call.  There is no language to cleanup,
  12.189 - * and PR does not shutdown..  but the process bookkeeping structure,
  12.190 - * which is used by PR to track and schedule the program, is freed.
  12.191 - *The PRProcess structure is kept until this call collects the results from it,
  12.192 - * then freed.  If the process is not done yet when PR gets this
  12.193 - * call, then this call waits..  the challenge here is that this call comes from
  12.194 - * a live OS thread that's outside PR..  so, inside here, it waits on a 
  12.195 - * condition..  then it's a PR thread that signals this to wake up..
  12.196 - *First checks whether the process is done, if yes, calls the clean-up fn then
  12.197 - * returns the result extracted from the PRProcess struct.
  12.198 - *If process not done yet, then performs a wait (in a loop to be sure the
  12.199 - * wakeup is not spurious, which can happen).  PR registers the wait, and upon
  12.200 - * the process ending (last SlaveVP owned by it dissipates), then PR signals
  12.201 - * this to wakeup.  This then calls the cleanup fn and returns the result.
  12.202 - */
  12.203 -/*
  12.204 -void *
  12.205 -PR_App__give_results_when_done_for( PRProcess *process )
  12.206 - { void *result;
  12.207 -   
  12.208 -   pthread_mutex_lock( process->doneLock );
  12.209 -   while( !(process->executionIsComplete) )
  12.210 -    {
  12.211 -      pthread_cond_wait( process->doneCond,
  12.212 -                         process->doneLock );
  12.213 -    }
  12.214 -   pthread_mutex_unlock( process->doneLock );
  12.215 -   
  12.216 -   result = process->resultToReturn;
  12.217 -   
  12.218 -   PR_int__cleanup_process_after_done( process );
  12.219 -   free( process );  //was malloc'd above, so free it here
  12.220 -   
  12.221 -   return result;
  12.222 - }
  12.223 -*/
  12.224 -
  12.225 -/*Turns off the PR system, and frees all data associated with it.  Does this
  12.226 - * by creating shutdown SlaveVPs and inserting them into animation slots.
  12.227 - * Will probably have to wake up sleeping cores as part of this -- the fn that
  12.228 - * inserts the new SlaveVPs should handle the wakeup..
  12.229 - */
  12.230 -/*
  12.231 -void
  12.232 -PR_SS__shutdown(); //already defined -- look at it
  12.233 -
  12.234 -void
  12.235 -PR_App__shutdown()
  12.236 - {
  12.237 -   for( cores )
  12.238 -    { slave = PR_int__create_new_SlaveVP( endOSThreadFn, NULL );
  12.239 -      PR_int__insert_slave_onto_core( SlaveVP *slave, coreNum );
  12.240 -    }
  12.241 - }
  12.242 -*/
  12.243 -
  12.244 -/* PR_App__start_PR_running();
  12.245 -
  12.246 -   PRProcess matrixMultProcess;
  12.247 -   
  12.248 -   matrixMultProcess =
  12.249 -    PR_App__spawn_program_on_data_in_Lang( &prog_seed_fn, data, Vthread_lang );
  12.250 -   
  12.251 -   resMatrix = PR_App__give_results_when_done_for( matrixMultProcess );
  12.252 -   
  12.253 -   PR_App__shutdown();
  12.254 - */
  12.255 -
  12.256 -void
  12.257 -create_masterEnv()
  12.258 - { MasterEnv       *masterEnv;
  12.259 -   PRQueueStruc  **readyToAnimateQs;
  12.260 -   int              coreIdx;
  12.261 -   SlaveVP        **masterVPs;
  12.262 -   AnimSlot     ***allAnimSlots; //ptr to array of ptrs
  12.263 -
  12.264 -
  12.265 -      //Make the master env, which holds everything else
  12.266 -   _PRMasterEnv = malloc( sizeof(MasterEnv) );
  12.267 -
  12.268 -        //Very first thing put into the master env is the free-list, seeded
  12.269 -        // with a massive initial chunk of memory.
  12.270 -        //After this, all other mallocs are PR__malloc.
  12.271 -   _PRMasterEnv->freeLists        = PR_ext__create_free_list();
  12.272 -   
  12.273 -   
  12.274 -   //===================== Only PR__malloc after this ====================
  12.275 -   masterEnv     = (MasterEnv*)_PRMasterEnv;
  12.276 -   
  12.277 -      //Make a readyToAnimateQ for each core controller
  12.278 -   readyToAnimateQs = PR_int__malloc( NUM_CORES * sizeof(PRQueueStruc *) );
  12.279 -   masterVPs        = PR_int__malloc( NUM_CORES * sizeof(SlaveVP *) );
  12.280 -
  12.281 -      //One array for each core, several in array, core's masterVP scheds all
  12.282 -   allAnimSlots    = PR_int__malloc( NUM_CORES * sizeof(AnimSlot *) );
  12.283 -
  12.284 -   _PRMasterEnv->numSlavesAlive = 0;  //used to detect shut-down condition
  12.285 -
  12.286 -//========================================
  12.287 -   semEnv->shutdownInitiated = FALSE;
  12.288 -   semEnv->coreIsDone = PR_int__malloc( NUM_CORES * sizeof( bool32 ) );
  12.289 -   
  12.290 -      //For each animation slot, there is an idle slave, and an initial
  12.291 -      // slave assigned as the current-task-slave.  Create them here.
  12.292 -   SlaveVP *idleSlv, *slotTaskSlv;
  12.293 -   for( coreNum = 0; coreNum < NUM_CORES; coreNum++ )
  12.294 -    { semEnv->coreIsDone[coreNum] = FALSE; //use during shutdown
  12.295 -    
  12.296 -      for( slotNum = 0; slotNum < NUM_ANIM_SLOTS; ++slotNum )
  12.297 -       { idleSlv = VSs__create_slave_helper( &idle_fn, NULL, semEnv, 0);
  12.298 -         idleSlv->coreAnimatedBy                = coreNum;
  12.299 -         idleSlv->animSlotAssignedTo            =
  12.300 -                               _PRMasterEnv->allAnimSlots[coreNum][slotNum];
  12.301 -         semEnv->idleSlv[coreNum][slotNum] = idleSlv;
  12.302 -         
  12.303 -         slotTaskSlv = VSs__create_slave_helper( &idle_fn, NULL, semEnv, 0);
  12.304 -         slotTaskSlv->coreAnimatedBy            = coreNum;
  12.305 -         slotTaskSlv->animSlotAssignedTo        = 
  12.306 -                               _PRMasterEnv->allAnimSlots[coreNum][slotNum];
  12.307 -         
  12.308 -         semData                    = slotTaskSlv->semanticData;
  12.309 -         semData->needsTaskAssigned = TRUE;
  12.310 -         semData->slaveType         = SlotTaskSlv;
  12.311 -         semEnv->slotTaskSlvs[coreNum][slotNum] = slotTaskSlv;
  12.312 -       }
  12.313 -    }
  12.314 -
  12.315 -      //create the recycle queue where free task slaves are put after their task ends
  12.316 -   semEnv->freeTaskSlvRecycleQ  = makePRQ();
  12.317 -   
  12.318 -
  12.319 -   semEnv->numLiveExtraTaskSlvs   = 0;
  12.320 -   semEnv->numLiveThreadSlvs      = 0; //none existent yet.. "create process" creates the seeds  
  12.321 -//==================================================================
  12.322 -   
  12.323 -   _PRMasterEnv->numSlavesCreated = 0;  //used by create slave to set slave ID
  12.324 -   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
  12.325 -    {    
  12.326 -      readyToAnimateQs[ coreIdx ] = makePRQ();
  12.327 -      
  12.328 -         //Q: should give masterVP core-specific info as its init data?
  12.329 -      masterVPs[ coreIdx ] = PR_int__create_slaveVP( (TopLevelFnPtr)&animationMaster, (void*)masterEnv );
  12.330 -      masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
  12.331 -      masterVPs[ coreIdx ]->typeOfVP = Master;
  12.332 -      allAnimSlots[ coreIdx ] = create_anim_slots( coreIdx ); //makes for one core
  12.333 -    }
  12.334 -   _PRMasterEnv->masterVPs        = masterVPs;
  12.335 -   _PRMasterEnv->masterLock       = UNLOCKED;
  12.336 -   _PRMasterEnv->seed1 = rand()%1000; // init random number generator
  12.337 -   _PRMasterEnv->seed2 = rand()%1000; // init random number generator
  12.338 -   _PRMasterEnv->allAnimSlots    = allAnimSlots;
  12.339 -   _PRMasterEnv->measHistsInfo = NULL; 
  12.340 -
  12.341 -   //============================= MEASUREMENT STUFF ========================
  12.342 -      
  12.343 -         MEAS__Make_Meas_Hists_for_Susp_Meas;
  12.344 -         MEAS__Make_Meas_Hists_for_Master_Meas;
  12.345 -         MEAS__Make_Meas_Hists_for_Master_Lock_Meas;
  12.346 -         MEAS__Make_Meas_Hists_for_Malloc_Meas;
  12.347 -         MEAS__Make_Meas_Hists_for_Plugin_Meas;
  12.348 -         MEAS__Make_Meas_Hists_for_Language;
  12.349 -
  12.350 -         PROBES__Create_Probe_Bookkeeping_Vars;
  12.351 -         
  12.352 -         HOLISTIC__Setup_Perf_Counters;
  12.353 -         
  12.354 -   //========================================================================
  12.355 - }
  12.356 -
  12.357 -AnimSlot **
  12.358 -create_anim_slots( int32 coreSlotsAreOn )
  12.359 - { AnimSlot  **animSlots;
  12.360 -   int i;
  12.361 -
  12.362 -   animSlots  = PR_int__malloc( NUM_ANIM_SLOTS * sizeof(AnimSlot *) );
  12.363 -
  12.364 -   for( i = 0; i < NUM_ANIM_SLOTS; i++ )
  12.365 -    {
  12.366 -      animSlots[i] = PR_int__malloc( sizeof(AnimSlot) );
  12.367 -
  12.368 -         //Set state to mean "handling requests done, slot needs filling"
  12.369 -      animSlots[i]->workIsDone         = FALSE;
  12.370 -      animSlots[i]->needsSlaveAssigned = TRUE;
  12.371 -      animSlots[i]->slotIdx            = i; //quick retrieval of slot pos
  12.372 -      animSlots[i]->coreSlotIsOn       = coreSlotsAreOn;
  12.373 -    }
  12.374 -   return animSlots;
  12.375 - }
  12.376 -
  12.377 -
  12.378 -void
  12.379 -freeAnimSlots( AnimSlot **animSlots )
  12.380 - { int i;
  12.381 -   for( i = 0; i < NUM_ANIM_SLOTS; i++ )
  12.382 -    {
  12.383 -      PR_int__free( animSlots[i] );
  12.384 -    }
  12.385 -   PR_int__free( animSlots );
  12.386 - }
  12.387 -
  12.388 -
  12.389 -void
  12.390 -create_the_coreCtlr_OS_threads()
  12.391 - {
  12.392 -   //========================================================================
  12.393 -   //                      Create the Threads
  12.394 -   int coreIdx, retCode;
  12.395 -
  12.396 -      //Need the threads to be created suspended, and wait for a signal
  12.397 -      // before proceeding -- gives time after creating to initialize other
  12.398 -      // stuff before the coreCtlrs set off.
  12.399 -   _PRMasterEnv->setupComplete = 0;
  12.400 -   
  12.401 -      //initialize the cond used to make the new threads wait and sync up
  12.402 -      //must do this before *creating* the threads..
  12.403 -   pthread_mutex_init( &suspendLock, NULL );
  12.404 -   pthread_cond_init( &suspendCond, NULL );
  12.405 -
  12.406 -      //Make the threads that animate the core controllers
  12.407 -   for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
  12.408 -    { coreCtlrThdParams[coreIdx]          = PR_int__malloc( sizeof(ThdParams) );
  12.409 -      coreCtlrThdParams[coreIdx]->coreNum = coreIdx;
  12.410 -
  12.411 -      retCode =
  12.412 -      pthread_create( &(coreCtlrThdHandles[coreIdx]),
  12.413 -                        thdAttrs,
  12.414 -                       &coreController,
  12.415 -               (void *)(coreCtlrThdParams[coreIdx]) );
  12.416 -      if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
  12.417 -    }
  12.418 - }
  12.419 -
  12.420 -
  12.421 -/*This is what causes the PR system to initialize.. then waits for it to
  12.422 - * exit.
  12.423 - * 
  12.424 - *Wrapper lib layer calls this when it wants the system to start running..
  12.425 - */
  12.426 -/*
  12.427 -void
  12.428 -PR_SS__start_the_work_then_wait_until_done()
  12.429 - { 
  12.430 -#ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
  12.431 -   //Only difference between version with an OS thread pinned to each core and
  12.432 -   // the sequential version of PR is PR__init_Seq, this, and coreCtlr_Seq.
  12.433 -   //
  12.434 -         //Instead of un-suspending threads, just call the one and only
  12.435 -         // core ctlr (sequential version), in the main thread.
  12.436 -      coreCtlr_Seq( NULL );
  12.437 -      flushRegisters();
  12.438 -#else
  12.439 -   int coreIdx;
  12.440 -      //Start the core controllers running
  12.441 -   
  12.442 -      //tell the core controller threads that setup is complete
  12.443 -      //get lock, to lock out any threads still starting up -- they'll see
  12.444 -      // that setupComplete is true before entering while loop, and so never
  12.445 -      // wait on the condition
  12.446 -   pthread_mutex_lock(     &suspendLock );
  12.447 -   _PRMasterEnv->setupComplete = 1;
  12.448 -   pthread_mutex_unlock(   &suspendLock );
  12.449 -   pthread_cond_broadcast( &suspendCond );
  12.450 -   
  12.451 -   
  12.452 -      //wait for all to complete
  12.453 -   for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
  12.454 -    {
  12.455 -      pthread_join( coreCtlrThdHandles[coreIdx], NULL );
  12.456 -    }
  12.457 -   
  12.458 -      //NOTE: do not clean up PR env here -- semantic layer has to have
  12.459 -      // a chance to clean up its environment first, then do a call to free
  12.460 -      // the Master env and rest of PR locations
  12.461 -#endif
  12.462 - }
  12.463 -*/
  12.464 -
  12.465 -SlaveVP* PR_SS__create_shutdown_slave(){
  12.466 -    SlaveVP* shutdownVP;
  12.467 -    
  12.468 -    shutdownVP = PR_int__create_slaveVP( &endOSThreadFn, NULL );
  12.469 -    shutdownVP->typeOfVP = Shutdown;
  12.470 -    
  12.471 -    return shutdownVP;
  12.472 -}
  12.473 -
  12.474 -//TODO: look at architecting cleanest separation between request handler
  12.475 -// and animation master, for dissipate, create, shutdown, and other non-semantic
  12.476 -// requests.  Issue is chain: one removes requests from AppSlv, one dispatches
  12.477 -// on type of request, and one handles each type..  but some types require
  12.478 -// action from both request handler and animation master -- maybe just give the
  12.479 -// request handler calls like:  PR__handle_X_request_type
  12.480 -
  12.481 -
  12.482 -/*This is called by the semantic layer's request handler when it decides its
  12.483 - * time to shut down the PR system.  Calling this causes the core controller OS
  12.484 - * threads to exit, which unblocks the entry-point function that started up
  12.485 - * PR, and allows it to grab the result and return to the original single-
  12.486 - * threaded application.
  12.487 - * 
  12.488 - *The _PRMasterEnv is needed by this shut down function, so the create-seed-
  12.489 - * and-wait function has to free a bunch of stuff after it detects the
  12.490 - * threads have all died: the masterEnv, the thread-related locations,
  12.491 - * masterVP any AppSlvs that might still be allocated and sitting in the
  12.492 - * semantic environment, or have been orphaned in the _PRWorkQ.
  12.493 - * 
  12.494 - *NOTE: the semantic plug-in is expected to use PR__malloc to get all the
  12.495 - * locations it needs, and give ownership to masterVP.  Then, they will be
  12.496 - * automatically freed.
  12.497 - *
  12.498 - *In here,create one core-loop shut-down processor for each core controller and put
  12.499 - * them all directly into the readyToAnimateQ.
  12.500 - *Note, this function can ONLY be called after the semantic environment no
  12.501 - * longer cares if AppSlvs get animated after the point this is called.  In
  12.502 - * other words, this can be used as an abort, or else it should only be
  12.503 - * called when all AppSlvs have finished dissipate requests -- only at that
  12.504 - * point is it sure that all results have completed.
  12.505 - */
  12.506 -void
  12.507 -PR_SS__shutdown()
  12.508 - { int32       coreIdx;
  12.509 -   SlaveVP    *shutDownSlv;
  12.510 -   AnimSlot **animSlots;
  12.511 -      //create the shutdown processors, one for each core controller -- put them
  12.512 -      // directly into the Q -- each core will die when gets one
  12.513 -   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
  12.514 -    {    //Note, this is running in the master
  12.515 -      shutDownSlv = PR_SS__create_shutdown_slave();
  12.516 -         //last slave has dissipated, so no more in slots, so write
  12.517 -         // shut down slave into first animulng slot.
  12.518 -      animSlots = _PRMasterEnv->allAnimSlots[ coreIdx ];
  12.519 -      animSlots[0]->slaveAssignedToSlot = shutDownSlv;
  12.520 -      animSlots[0]->needsSlaveAssigned = FALSE;
  12.521 -      shutDownSlv->coreAnimatedBy = coreIdx;
  12.522 -      shutDownSlv->animSlotAssignedTo = animSlots[ 0 ];
  12.523 -    }
  12.524 - }
  12.525 -
  12.526 -
  12.527 -/*Am trying to be cute, avoiding IF statement in coreCtlr that checks for
  12.528 - * a special shutdown slaveVP.  Ended up with extra-complex shutdown sequence.
  12.529 - *This function has the sole purpose of setting the stack and framePtr
  12.530 - * to the coreCtlr's stack and framePtr.. it does that then jumps to the
  12.531 - * core ctlr's shutdown point -- might be able to just call Pthread_exit
  12.532 - * from here, but am going back to the pthread's stack and setting everything
  12.533 - * up just as if it never jumped out, before calling pthread_exit.
  12.534 - *The end-point of core ctlr will free the stack and so forth of the
  12.535 - * processor that animates this function, (this fn is transfering the
  12.536 - * animator of the AppSlv that is in turn animating this function over
  12.537 - * to core controller function -- note that this slices out a level of virtual
  12.538 - * processors).
  12.539 - */
  12.540 -void
  12.541 -endOSThreadFn( void *initData, SlaveVP *animatingSlv )
  12.542 - { 
  12.543 -   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
  12.544 -    asmTerminateCoreCtlrSeq(animatingSlv);
  12.545 -   #else
  12.546 -    asmTerminateCoreCtlr(animatingSlv);
  12.547 -   #endif
  12.548 - }
  12.549 -
  12.550 -
  12.551 -/*This is called from the startup & shutdown
  12.552 - */
  12.553 -void
  12.554 -PR_SS__cleanup_at_end_of_shutdown()
  12.555 - { 
  12.556 -      //Before getting rid of everything, print out any measurements made
  12.557 -   if( _PRMasterEnv->measHistsInfo != NULL )
  12.558 -    { forAllInDynArrayDo( _PRMasterEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
  12.559 -      forAllInDynArrayDo( _PRMasterEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
  12.560 -      forAllInDynArrayDo( _PRMasterEnv->measHistsInfo, (DynArrayFnPtr)&freeHist );
  12.561 -    }
  12.562 -   
  12.563 -   MEAS__Print_Hists_for_Susp_Meas;
  12.564 -   MEAS__Print_Hists_for_Master_Meas;
  12.565 -   MEAS__Print_Hists_for_Master_Lock_Meas;
  12.566 -   MEAS__Print_Hists_for_Malloc_Meas;
  12.567 -   MEAS__Print_Hists_for_Plugin_Meas;
  12.568 -   
  12.569 -
  12.570 -      //All the environment data has been allocated with PR__malloc, so just
  12.571 -      // free its internal big-chunk and all inside it disappear.
  12.572 -/*
  12.573 -   readyToAnimateQs = _PRMasterEnv->readyToAnimateQs;
  12.574 -   masterVPs        = _PRMasterEnv->masterVPs;
  12.575 -   allAnimSlots    = _PRMasterEnv->allAnimSlots;
  12.576 -   
  12.577 -   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
  12.578 -    {
  12.579 -      freePRQ( readyToAnimateQs[ coreIdx ] );
  12.580 -         //master Slvs were created external to PR, so use external free
  12.581 -      PR_int__dissipate_slaveVP( masterVPs[ coreIdx ] );
  12.582 -      
  12.583 -      freeAnimSlots( allAnimSlots[ coreIdx ] );
  12.584 -    }
  12.585 -   
  12.586 -   PR_int__free( _PRMasterEnv->readyToAnimateQs );
  12.587 -   PR_int__free( _PRMasterEnv->masterVPs );
  12.588 -   PR_int__free( _PRMasterEnv->allAnimSlots );
  12.589 -   
  12.590 -   //============================= MEASUREMENT STUFF ========================
  12.591 -   #ifdef PROBES__TURN_ON_STATS_PROBES
  12.592 -   freeDynArrayDeep( _PRMasterEnv->dynIntervalProbesInfo, &PR_WL__free_probe);
  12.593 -   #endif
  12.594 -   //========================================================================
  12.595 -*/
  12.596 -      //These are the only two that use system free 
  12.597 -   PR_ext__free_free_list( _PRMasterEnv->freeLists );
  12.598 -   free( (void *)_PRMasterEnv );
  12.599 - }
  12.600 -
  12.601 -
  12.602 -//================================
  12.603 -
  12.604 -
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/PR_req_handlers.c	Tue Oct 23 23:46:17 2012 -0700
    13.3 @@ -0,0 +1,57 @@
    13.4 +/*
    13.5 + * Copyright 2010  OpenSourceStewardshipFoundation
    13.6 + *
    13.7 + * Licensed under BSD
    13.8 + */
    13.9 +
   13.10 +#include <stdio.h>
   13.11 +#include <stdlib.h>
   13.12 +#include <string.h>
   13.13 +#include <malloc.h>
   13.14 +#include <inttypes.h>
   13.15 +#include <sys/time.h>
   13.16 +
   13.17 +#include "PR.h"
   13.18 +
   13.19 +
   13.20 +/* MEANING OF   WL  PI  SS  int
   13.21 + * These indicate which places the function is safe to use.  They stand for:
   13.22 + * WL: Wrapper Library
   13.23 + * PI: Plugin 
   13.24 + * SS: Startup and Shutdown
   13.25 + * int: internal to the PR implementation
   13.26 + */
   13.27 +
   13.28 +
   13.29 +/*
   13.30 + */
   13.31 +void inline
   13.32 +handleMakeProbe( PRSemReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn )
   13.33 + { IntervalProbe *newProbe;
   13.34 +
   13.35 +   newProbe          = PR_int__malloc( sizeof(IntervalProbe) );
   13.36 +   newProbe->nameStr = PR_int__strDup( semReq->nameStr );
   13.37 +   newProbe->hist    = NULL;
   13.38 +   newProbe->schedChoiceWasRecorded = FALSE;
   13.39 +
   13.40 +      //This runs in masterVP, so no race-condition worries
   13.41 +   newProbe->probeID =
   13.42 +            addToDynArray( newProbe, _PRMasterEnv->dynIntervalProbesInfo );
   13.43 +
   13.44 +   semReq->requestingSlv->dataRetFromReq = newProbe;
   13.45 +
   13.46 +   //This in inside PR, while resume_slaveVP fn is inside language, so pass
   13.47 +   // pointer from lang to here, then call it.
   13.48 +   (*resumeFn)( semReq->requestingSlv, semEnv );
   13.49 + }
   13.50 +
   13.51 +void inline
   13.52 +handleThrowException( PRSemReq *semReq, void *semEnv, ResumeSlvFnPtr resumeFn )
   13.53 + {
   13.54 +   PR_int__throw_exception(  semReq->msgStr, semReq->requestingSlv, semReq->exceptionData );
   13.55 +   
   13.56 +   (*resumeFn)( semReq->requestingSlv, semEnv );
   13.57 + }
   13.58 +
   13.59 +
   13.60 +
    14.1 --- a/Services_Offered_by_PR/Measurement_and_Stats/MEAS__macros.h	Wed Sep 19 23:12:44 2012 -0700
    14.2 +++ b/Services_Offered_by_PR/Measurement_and_Stats/MEAS__macros.h	Tue Oct 23 23:46:17 2012 -0700
    14.3 @@ -26,7 +26,7 @@
    14.4  
    14.5     #define MEAS__Capture_Post_Point( histName ) \
    14.6        saveLowTimeStampCountInto( endStamp ); \
    14.7 -      addIntervalToHist( startStamp, endStamp, _PRMasterEnv->histName ); 
    14.8 +      addIntervalToHist( startStamp, endStamp, _PRTopEnv->histName ); 
    14.9  
   14.10  
   14.11  
   14.12 @@ -43,9 +43,9 @@
   14.13         Histogram       *suspHighTimeHist;
   14.14  
   14.15     #define MEAS__Make_Meas_Hists_for_Susp_Meas \
   14.16 -      _PRMasterEnv->suspLowTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.17 +      _PRTopEnv->suspLowTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.18                                                      "master_low_time_hist");\
   14.19 -      _PRMasterEnv->suspHighTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.20 +      _PRTopEnv->suspHighTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.21                                                      "master_high_time_hist");
   14.22        
   14.23        //record time stamp: compare to time-stamp recorded below
   14.24 @@ -56,12 +56,12 @@
   14.25     #define MEAS__Capture_Post_Susp_Point \
   14.26        saveLowTimeStampCountInto( animatingSlv->postSuspTSCLow );\
   14.27        addIntervalToHist( preSuspTSCLow, postSuspTSCLow,\
   14.28 -                         _PRMasterEnv->suspLowTimeHist ); \
   14.29 +                         _PRTopEnv->suspLowTimeHist ); \
   14.30        addIntervalToHist( preSuspTSCLow, postSuspTSCLow,\
   14.31 -                         _PRMasterEnv->suspHighTimeHist );
   14.32 +                         _PRTopEnv->suspHighTimeHist );
   14.33  
   14.34     #define MEAS__Print_Hists_for_Susp_Meas \
   14.35 -      printHist( _PRMasterEnv->pluginTimeHist );
   14.36 +      printHist( _PRTopEnv->pluginTimeHist );
   14.37        
   14.38  #else
   14.39     #define MEAS__Insert_Susp_Meas_Fields_into_Slave     
   14.40 @@ -82,9 +82,9 @@
   14.41         Histogram       *masterHighTimeHist;
   14.42  
   14.43     #define MEAS__Make_Meas_Hists_for_Master_Meas \
   14.44 -      _PRMasterEnv->masterLowTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.45 +      _PRTopEnv->masterLowTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.46                                                      "master_low_time_hist");\
   14.47 -      _PRMasterEnv->masterHighTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.48 +      _PRTopEnv->masterHighTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
   14.49                                                      "master_high_time_hist");
   14.50  
   14.51        //Total Master time includes one coreloop time -- just assume the core
   14.52 @@ -96,12 +96,12 @@
   14.53     #define MEAS__Capture_Post_Master_Point \
   14.54        saveLowTimeStampCountInto( masterVP->endMasterTSCLow );\
   14.55        addIntervalToHist( startMasterTSCLow, endMasterTSCLow,\
   14.56 -                         _PRMasterEnv->masterLowTimeHist ); \
   14.57 +                         _PRTopEnv->masterLowTimeHist ); \
   14.58        addIntervalToHist( startMasterTSCLow, endMasterTSCLow,\
   14.59 -                         _PRMasterEnv->masterHighTimeHist );
   14.60 +                         _PRTopEnv->masterHighTimeHist );
   14.61  
   14.62     #define MEAS__Print_Hists_for_Master_Meas \
   14.63 -      printHist( _PRMasterEnv->pluginTimeHist );
   14.64 +      printHist( _PRTopEnv->pluginTimeHist );
   14.65  
   14.66  #else
   14.67     #define MEAS__Insert_Master_Meas_Fields_into_Slave
   14.68 @@ -119,9 +119,9 @@
   14.69         Histogram       *masterLockHighTimeHist;
   14.70  
   14.71     #define MEAS__Make_Meas_Hists_for_Master_Lock_Meas \
   14.72 -      _PRMasterEnv->masterLockLowTimeHist  = makeFixedBinHist( 50, 0, 2, \
   14.73 +      _PRTopEnv->masterLockLowTimeHist  = makeFixedBinHist( 50, 0, 2, \
   14.74                                                 "master lock low time hist");\
   14.75 -      _PRMasterEnv->masterLockHighTimeHist  = makeFixedBinHist( 50, 0, 100,\
   14.76 +      _PRTopEnv->masterLockHighTimeHist  = makeFixedBinHist( 50, 0, 100,\
   14.77                                                 "master lock high time hist");
   14.78  
   14.79     #define MEAS__Capture_Pre_Master_Lock_Point \
   14.80 @@ -131,13 +131,13 @@
   14.81     #define MEAS__Capture_Post_Master_Lock_Point \
   14.82        saveLowTimeStampCountInto( endStamp ); \
   14.83        addIntervalToHist( startStamp, endStamp,\
   14.84 -                         _PRMasterEnv->masterLockLowTimeHist ); \
   14.85 +                         _PRTopEnv->masterLockLowTimeHist ); \
   14.86        addIntervalToHist( startStamp, endStamp,\
   14.87 -                         _PRMasterEnv->masterLockHighTimeHist );
   14.88 +                         _PRTopEnv->masterLockHighTimeHist );
   14.89  
   14.90     #define MEAS__Print_Hists_for_Master_Lock_Meas \
   14.91 -      printHist( _PRMasterEnv->masterLockLowTimeHist ); \
   14.92 -      printHist( _PRMasterEnv->masterLockHighTimeHist );
   14.93 +      printHist( _PRTopEnv->masterLockLowTimeHist ); \
   14.94 +      printHist( _PRTopEnv->masterLockHighTimeHist );
   14.95        
   14.96  #else
   14.97     #define MEAS__Insert_Master_Lock_Meas_Fields_into_MasterEnv
   14.98 @@ -154,9 +154,9 @@
   14.99         Histogram       *freeTimeHist;
  14.100  
  14.101     #define MEAS__Make_Meas_Hists_for_Malloc_Meas \
  14.102 -      _PRMasterEnv->mallocTimeHist  = makeFixedBinHistExt( 100, 0, 30,\
  14.103 +      _PRTopEnv->mallocTimeHist  = makeFixedBinHistExt( 100, 0, 30,\
  14.104                                                         "malloc_time_hist");\
  14.105 -      _PRMasterEnv->freeTimeHist  = makeFixedBinHistExt( 100, 0, 30,\
  14.106 +      _PRTopEnv->freeTimeHist  = makeFixedBinHistExt( 100, 0, 30,\
  14.107                                                         "free_time_hist");
  14.108  
  14.109     #define MEAS__Capture_Pre_Malloc_Point \
  14.110 @@ -166,7 +166,7 @@
  14.111     #define MEAS__Capture_Post_Malloc_Point \
  14.112        saveLowTimeStampCountInto( endStamp ); \
  14.113        addIntervalToHist( startStamp, endStamp,\
  14.114 -                         _PRMasterEnv->mallocTimeHist ); 
  14.115 +                         _PRTopEnv->mallocTimeHist ); 
  14.116  
  14.117     #define MEAS__Capture_Pre_Free_Point \
  14.118        int32 startStamp, endStamp; \
  14.119 @@ -175,15 +175,15 @@
  14.120     #define MEAS__Capture_Post_Free_Point \
  14.121        saveLowTimeStampCountInto( endStamp ); \
  14.122        addIntervalToHist( startStamp, endStamp,\
  14.123 -                         _PRMasterEnv->freeTimeHist ); 
  14.124 +                         _PRTopEnv->freeTimeHist ); 
  14.125  
  14.126     #define MEAS__Print_Hists_for_Malloc_Meas \
  14.127 -      printHist( _PRMasterEnv->mallocTimeHist   ); \
  14.128 -      saveHistToFile( _PRMasterEnv->mallocTimeHist   ); \
  14.129 -      printHist( _PRMasterEnv->freeTimeHist     ); \
  14.130 -      saveHistToFile( _PRMasterEnv->freeTimeHist     ); \
  14.131 -      freeHistExt( _PRMasterEnv->mallocTimeHist ); \
  14.132 -      freeHistExt( _PRMasterEnv->freeTimeHist   );
  14.133 +      printHist( _PRTopEnv->mallocTimeHist   ); \
  14.134 +      saveHistToFile( _PRTopEnv->mallocTimeHist   ); \
  14.135 +      printHist( _PRTopEnv->freeTimeHist     ); \
  14.136 +      saveHistToFile( _PRTopEnv->freeTimeHist     ); \
  14.137 +      freeHistExt( _PRTopEnv->mallocTimeHist ); \
  14.138 +      freeHistExt( _PRTopEnv->freeTimeHist   );
  14.139        
  14.140  #else
  14.141     #define MEAS__Insert_Malloc_Meas_Fields_into_MasterEnv
  14.142 @@ -203,9 +203,9 @@
  14.143        Histogram       *reqHdlrHighTimeHist;
  14.144            
  14.145     #define MEAS__Make_Meas_Hists_for_Plugin_Meas \
  14.146 -      _PRMasterEnv->reqHdlrLowTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
  14.147 +      _PRTopEnv->reqHdlrLowTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
  14.148                                                      "plugin_low_time_hist");\
  14.149 -      _PRMasterEnv->reqHdlrHighTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
  14.150 +      _PRTopEnv->reqHdlrHighTimeHist  = makeFixedBinHistExt( 100, 0, 200,\
  14.151                                                      "plugin_high_time_hist");
  14.152  
  14.153     #define MEAS__startReqHdlr \
  14.154 @@ -215,17 +215,17 @@
  14.155     #define MEAS__endReqHdlr \
  14.156        saveLowTimeStampCountInto( endStamp1 ); \
  14.157        addIntervalToHist( startStamp1, endStamp1, \
  14.158 -                           _PRMasterEnv->reqHdlrLowTimeHist ); \
  14.159 +                           _PRTopEnv->reqHdlrLowTimeHist ); \
  14.160        addIntervalToHist( startStamp1, endStamp1, \
  14.161 -                           _PRMasterEnv->reqHdlrHighTimeHist );
  14.162 +                           _PRTopEnv->reqHdlrHighTimeHist );
  14.163  
  14.164     #define MEAS__Print_Hists_for_Plugin_Meas \
  14.165 -      printHist( _PRMasterEnv->reqHdlrLowTimeHist ); \
  14.166 -      saveHistToFile( _PRMasterEnv->reqHdlrLowTimeHist ); \
  14.167 -      printHist( _PRMasterEnv->reqHdlrHighTimeHist ); \
  14.168 -      saveHistToFile( _PRMasterEnv->reqHdlrHighTimeHist ); \
  14.169 -      freeHistExt( _PRMasterEnv->reqHdlrLowTimeHist ); \
  14.170 -      freeHistExt( _PRMasterEnv->reqHdlrHighTimeHist );
  14.171 +      printHist( _PRTopEnv->reqHdlrLowTimeHist ); \
  14.172 +      saveHistToFile( _PRTopEnv->reqHdlrLowTimeHist ); \
  14.173 +      printHist( _PRTopEnv->reqHdlrHighTimeHist ); \
  14.174 +      saveHistToFile( _PRTopEnv->reqHdlrHighTimeHist ); \
  14.175 +      freeHistExt( _PRTopEnv->reqHdlrLowTimeHist ); \
  14.176 +      freeHistExt( _PRTopEnv->reqHdlrHighTimeHist );
  14.177  #else
  14.178     #define MEAS__Insert_Plugin_Meas_Fields_into_MasterEnv
  14.179     #define MEAS__Make_Meas_Hists_for_Plugin_Meas
  14.180 @@ -255,16 +255,16 @@
  14.181     #define MEAS__startAnimationMaster_forSys \
  14.182        TSCountLowHigh startStamp1, endStamp1; \
  14.183        saveTSCLowHigh( endStamp1 ); \
  14.184 -      _PRMasterEnv->cyclesTillStartAnimationMaster = \
  14.185 +      _PRTopEnv->cyclesTillStartAnimationMaster = \
  14.186        endStamp1.longVal - masterVP->startSusp.longVal;
  14.187  
  14.188     #define Meas_startReqHdlr_forSys \
  14.189          saveTSCLowHigh( startStamp1 ); \
  14.190 -        _PRMasterEnv->startReqHdlr.longVal = startStamp1.longVal;
  14.191 +        _PRTopEnv->startReqHdlr.longVal = startStamp1.longVal;
  14.192   
  14.193     #define MEAS__endAnimationMaster_forSys \
  14.194        saveTSCLowHigh( startStamp1 ); \
  14.195 -      _PRMasterEnv->endAnimationMaster.longVal = startStamp1.longVal;
  14.196 +      _PRTopEnv->endAnimationMaster.longVal = startStamp1.longVal;
  14.197  
  14.198     /*A TSC is stored in VP first thing inside wrapper-lib
  14.199      * Now, measures cycles from there to here
  14.200 @@ -279,7 +279,7 @@
  14.201               currVP->numGoodSusp++; \
  14.202             } \
  14.203               /*recorded every time, but only read if currVP == MasterVP*/ \
  14.204 -          _PRMasterEnv->startMaster.longVal = endSusp.longVal;
  14.205 +          _PRTopEnv->startMaster.longVal = endSusp.longVal;
  14.206  
  14.207  #else
  14.208     #define MEAS__Insert_System_Meas_Fields_into_Slave 
  14.209 @@ -314,9 +314,9 @@
  14.210     };
  14.211     
  14.212     #define saveCyclesAndInstrs(core,cycles,instrs,cachem) do{ \
  14.213 -   int cycles_fd = _PRMasterEnv->cycles_counter_fd[core]; \
  14.214 -   int instrs_fd = _PRMasterEnv->instrs_counter_fd[core]; \
  14.215 -   int cachem_fd = _PRMasterEnv->cachem_counter_fd[core]; \
  14.216 +   int cycles_fd = _PRTopEnv->cycles_counter_fd[core]; \
  14.217 +   int instrs_fd = _PRTopEnv->instrs_counter_fd[core]; \
  14.218 +   int cachem_fd = _PRTopEnv->cachem_counter_fd[core]; \
  14.219     int nread;                                           \
  14.220                                                          \
  14.221     nread = read(cycles_fd,&(cycles),sizeof(cycles));    \
  14.222 @@ -348,7 +348,7 @@
  14.223     
  14.224  
  14.225     #define HOLISTIC__CoreCtrl_Setup \
  14.226 -   CounterHandler counterHandler = _PRMasterEnv->counterHandler; \
  14.227 +   CounterHandler counterHandler = _PRTopEnv->counterHandler; \
  14.228     SlaveVP      *lastVPBeforeMaster = NULL; \
  14.229     /*if(thisCoresThdParams->coreNum == 0){ \
  14.230         uint64 initval = tsc_offset_send(thisCoresThdParams,0); \
  14.231 @@ -376,12 +376,12 @@
  14.232        uint64 cycles,instrs,cachem; \
  14.233        saveCyclesAndInstrs(thisCoresIdx,cycles, instrs,cachem); \
  14.234        if(lastVPBeforeMaster){ \
  14.235 -        (*counterHandler)(AppResponderInvocation_start,lastVPBeforeMaster->slaveID,lastVPBeforeMaster->assignCount,lastVPBeforeMaster,cycles,instrs,cachem); \
  14.236 +        (*counterHandler)(AppResponderInvocation_start,lastVPBeforeMaster->slaveID,lastVPBeforeMaster->numTimesAssignedToASlot,lastVPBeforeMaster,cycles,instrs,cachem); \
  14.237          lastVPBeforeMaster = NULL; \
  14.238        } else { \
  14.239 -          _PRMasterEnv->start_master_lock[thisCoresIdx][0] = cycles; \
  14.240 -          _PRMasterEnv->start_master_lock[thisCoresIdx][1] = instrs; \
  14.241 -          _PRMasterEnv->start_master_lock[thisCoresIdx][2] = cachem; \
  14.242 +          _PRTopEnv->start_master_lock[thisCoresIdx][0] = cycles; \
  14.243 +          _PRTopEnv->start_master_lock[thisCoresIdx][1] = instrs; \
  14.244 +          _PRTopEnv->start_master_lock[thisCoresIdx][2] = cachem; \
  14.245        }
  14.246   
  14.247             /* Request Handler may call resume() on the VP, but we want to 
  14.248 @@ -396,7 +396,7 @@
  14.249                  */
  14.250     #define HOLISTIC__Record_AppResponder_start \
  14.251                 vpid = currSlot->slaveAssignedToSlot->slaveID; \
  14.252 -               task = currSlot->slaveAssignedToSlot->assignCount; \
  14.253 +               task = currSlot->slaveAssignedToSlot->numTimesAssignedToASlot; \
  14.254                 uint64 cycles, instrs, cachem; \
  14.255                 saveCyclesAndInstrs(thisCoresIdx,cycles, instrs,cachem); \
  14.256                 (*counterHandler)(AppResponder_start,vpid,task,currSlot->slaveAssignedToSlot,cycles,instrs,cachem);
  14.257 @@ -429,30 +429,30 @@
  14.258          uint64 cycles,instrs,cachem; \
  14.259          saveCyclesAndInstrs(thisCoresIdx,cycles,instrs,cachem); \
  14.260          if(empty){ \
  14.261 -            (*counterHandler)(AssignerInvocation_start,assignedSlaveVP->slaveID,assignedSlaveVP->assignCount,assignedSlaveVP,masterEnv->start_master_lock[thisCoresIdx][0],masterEnv->start_master_lock[thisCoresIdx][1],masterEnv->start_master_lock[thisCoresIdx][2]); \
  14.262 +            (*counterHandler)(AssignerInvocation_start,assignedSlaveVP->slaveID,assignedSlaveVP->numTimesAssignedToASlot,assignedSlaveVP,masterEnv->start_master_lock[thisCoresIdx][0],masterEnv->start_master_lock[thisCoresIdx][1],masterEnv->start_master_lock[thisCoresIdx][2]); \
  14.263          } \
  14.264 -        (*counterHandler)(Timestamp_start,assignedSlaveVP->slaveID,assignedSlaveVP->assignCount,assignedSlaveVP,tsc,0,0); \
  14.265 -        (*counterHandler)(Assigner_start,assignedSlaveVP->slaveID,assignedSlaveVP->assignCount,assignedSlaveVP,tmp_cycles,tmp_instrs,tmp_cachem); \
  14.266 -        (*counterHandler)(Assigner_end,assignedSlaveVP->slaveID,assignedSlaveVP->assignCount,assignedSlaveVP,cycles,instrs,tmp_cachem);
  14.267 +        (*counterHandler)(Timestamp_start,assignedSlaveVP->slaveID,assignedSlaveVP->numTimesAssignedToASlot,assignedSlaveVP,tsc,0,0); \
  14.268 +        (*counterHandler)(Assigner_start,assignedSlaveVP->slaveID,assignedSlaveVP->numTimesAssignedToASlot,assignedSlaveVP,tmp_cycles,tmp_instrs,tmp_cachem); \
  14.269 +        (*counterHandler)(Assigner_end,assignedSlaveVP->slaveID,assignedSlaveVP->numTimesAssignedToASlot,assignedSlaveVP,cycles,instrs,tmp_cachem);
  14.270  
  14.271     #define HOLISTIC__Record_Work_start \
  14.272          if(currVP){ \
  14.273                  uint64 cycles,instrs,cachem; \
  14.274                  saveCyclesAndInstrs(thisCoresIdx,cycles, instrs,cachem); \
  14.275 -                (*counterHandler)(Work_start,currVP->slaveID,currVP->assignCount,currVP,cycles,instrs,cachem); \
  14.276 +                (*counterHandler)(Work_start,currVP->slaveID,currVP->numTimesAssignedToASlot,currVP,cycles,instrs,cachem); \
  14.277          }
  14.278     
  14.279     #define HOLISTIC__Record_Work_end \
  14.280         if(currVP){ \
  14.281                 uint64 cycles,instrs,cachem; \
  14.282                 saveCyclesAndInstrs(thisCoresIdx,cycles, instrs,cachem); \
  14.283 -               (*counterHandler)(Work_end,currVP->slaveID,currVP->assignCount,currVP,cycles,instrs,cachem); \
  14.284 +               (*counterHandler)(Work_end,currVP->slaveID,currVP->numTimesAssignedToASlot,currVP,cycles,instrs,cachem); \
  14.285         }
  14.286  
  14.287     #define HOLISTIC__Record_HwResponderInvocation_start \
  14.288          uint64 cycles,instrs,cachem; \
  14.289          saveCyclesAndInstrs(animatingSlv->coreAnimatedBy,cycles, instrs,cachem); \
  14.290 -        (*(_PRMasterEnv->counterHandler))(HwResponderInvocation_start,animatingSlv->slaveID,animatingSlv->assignCount,animatingSlv,cycles,instrs,cachem); 
  14.291 +        (*(_PRTopEnv->counterHandler))(HwResponderInvocation_start,animatingSlv->slaveID,animatingSlv->numTimesAssignedToASlot,animatingSlv,cycles,instrs,cachem); 
  14.292          
  14.293  
  14.294     #define getReturnAddressBeforeLibraryCall(vp_ptr, res_ptr) do{     \
  14.295 @@ -502,8 +502,8 @@
  14.296  #endif
  14.297  
  14.298  #define makeAMeasHist( idx, name, numBins, startVal, binWidth ) \
  14.299 -      makeHighestDynArrayIndexBeAtLeast( _PRMasterEnv->measHistsInfo, idx ); \
  14.300 -      _PRMasterEnv->measHists[idx] =  \
  14.301 +      makeHighestDynArrayIndexBeAtLeast( _PRTopEnv->measHistsInfo, idx ); \
  14.302 +      _PRTopEnv->measHists[idx] =  \
  14.303                         makeFixedBinHist( numBins, startVal, binWidth, name );
  14.304  
  14.305  //==============================  Probes  ===================================
    15.1 --- a/Services_Offered_by_PR/Measurement_and_Stats/probes.c	Wed Sep 19 23:12:44 2012 -0700
    15.2 +++ b/Services_Offered_by_PR/Measurement_and_Stats/probes.c	Tue Oct 23 23:46:17 2012 -0700
    15.3 @@ -49,12 +49,12 @@
    15.4  IntervalProbe *
    15.5  create_generic_probe( char *nameStr, SlaveVP *animSlv )
    15.6   {
    15.7 -   PRSemReq reqData;
    15.8 +   PRServReq reqData;
    15.9  
   15.10     reqData.reqType  = make_probe;
   15.11     reqData.nameStr  = nameStr;
   15.12  
   15.13 -   PR_WL__send_PRSem_request( &reqData, animSlv );
   15.14 +   PR_WL__send_service_request( &reqData, animSlv );
   15.15  
   15.16     return animSlv->dataRetFromReq;
   15.17   }
   15.18 @@ -74,7 +74,7 @@
   15.19     newProbe->hist    = NULL;
   15.20     newProbe->schedChoiceWasRecorded = FALSE;
   15.21     newProbe->probeID =
   15.22 -             addToDynArray( newProbe, _PRMasterEnv->dynIntervalProbesInfo );
   15.23 +             addToDynArray( newProbe, _PRTopEnv->dynIntervalProbesInfo );
   15.24  
   15.25     return newProbe;
   15.26   }
   15.27 @@ -164,9 +164,9 @@
   15.28   { IntervalProbe *probe;
   15.29  
   15.30     PR_int__get_master_lock();
   15.31 -   probe = _PRMasterEnv->intervalProbes[ probeID ];
   15.32 +   probe = _PRTopEnv->intervalProbes[ probeID ];
   15.33  
   15.34 -   addValueIntoTable(probe->nameStr, probe, _PRMasterEnv->probeNameHashTbl);
   15.35 +   addValueIntoTable(probe->nameStr, probe, _PRTopEnv->probeNameHashTbl);
   15.36     PR_int__release_master_lock();
   15.37   }
   15.38  
   15.39 @@ -175,7 +175,7 @@
   15.40  PR_impl__get_probe_by_name( char *probeName, SlaveVP *animSlv )
   15.41   {
   15.42     //TODO: fix this To be in Master -- race condition
   15.43 -   return getValueFromTable( probeName, _PRMasterEnv->probeNameHashTbl );
   15.44 +   return getValueFromTable( probeName, _PRTopEnv->probeNameHashTbl );
   15.45   }
   15.46  
   15.47  
   15.48 @@ -186,7 +186,7 @@
   15.49  PR_impl__record_sched_choice_into_probe( int32 probeID, SlaveVP *animatingSlv )
   15.50   { IntervalProbe *probe;
   15.51   
   15.52 -   probe = _PRMasterEnv->intervalProbes[ probeID ];
   15.53 +   probe = _PRTopEnv->intervalProbes[ probeID ];
   15.54     probe->schedChoiceWasRecorded = TRUE;
   15.55     probe->coreNum = animatingSlv->coreAnimatedBy;
   15.56     probe->slaveID = animatingSlv->slaveID;
   15.57 @@ -201,7 +201,7 @@
   15.58   { IntervalProbe *probe;
   15.59  
   15.60           DEBUG__printf( dbgProbes, "record start of interval" )
   15.61 -   probe = _PRMasterEnv->intervalProbes[ probeID ];
   15.62 +   probe = _PRTopEnv->intervalProbes[ probeID ];
   15.63  
   15.64        //record *start* point as last thing, after lookup
   15.65  #ifdef PROBES__USE_TIME_OF_DAY_PROBES
   15.66 @@ -235,7 +235,7 @@
   15.67  
   15.68  #endif
   15.69     
   15.70 -   probe = _PRMasterEnv->intervalProbes[ probeID ];
   15.71 +   probe = _PRTopEnv->intervalProbes[ probeID ];
   15.72  
   15.73  #ifdef PROBES__USE_TIME_OF_DAY_PROBES
   15.74     if( probe->hist != NULL )
   15.75 @@ -272,12 +272,12 @@
   15.76     if( probe->endSecs == 0 ) //just a single point in time
   15.77      {
   15.78        printf( " time point: %.6f\n",
   15.79 -              probe->startSecs - _PRMasterEnv->createPtInSecs );
   15.80 +              probe->startSecs - _PRTopEnv->createPtInSecs );
   15.81      }
   15.82     else if( probe->hist == NULL ) //just an interval
   15.83      {
   15.84        printf( " startSecs: %.6f interval: %.6f\n", 
   15.85 -         (probe->startSecs - _PRMasterEnv->createPtInSecs), probe->interval);
   15.86 +         (probe->startSecs - _PRTopEnv->createPtInSecs), probe->interval);
   15.87      }
   15.88     else  //a full histogram of intervals
   15.89      {
   15.90 @@ -289,7 +289,7 @@
   15.91  PR_impl__print_stats_of_probe( IntervalProbe *probe )
   15.92   { 
   15.93  
   15.94 -//   probe = _PRMasterEnv->intervalProbes[ probeID ];
   15.95 +//   probe = _PRTopEnv->intervalProbes[ probeID ];
   15.96  
   15.97     print_probe_helper( probe );
   15.98   }
   15.99 @@ -298,7 +298,7 @@
  15.100  void
  15.101  PR_impl__print_stats_of_all_probes()
  15.102   {
  15.103 -   forAllInDynArrayDo( _PRMasterEnv->dynIntervalProbesInfo,
  15.104 +   forAllInDynArrayDo( _PRTopEnv->dynIntervalProbesInfo,
  15.105                            (DynArrayFnPtr) &PR_impl__print_stats_of_probe );
  15.106     fflush( stdout );
  15.107   }
    16.1 --- a/Services_Offered_by_PR/Measurement_and_Stats/probes.h	Wed Sep 19 23:12:44 2012 -0700
    16.2 +++ b/Services_Offered_by_PR/Measurement_and_Stats/probes.h	Tue Oct 23 23:46:17 2012 -0700
    16.3 @@ -121,15 +121,15 @@
    16.4  #ifdef PROBES__TURN_ON_STATS_PROBES
    16.5  
    16.6     #define PROBES__Create_Probe_Bookkeeping_Vars \
    16.7 -      _PRMasterEnv->dynIntervalProbesInfo = \
    16.8 -       makePrivDynArrayOfSize( (void***)&(_PRMasterEnv->intervalProbes), 200); \
    16.9 +      _PRTopEnv->dynIntervalProbesInfo = \
   16.10 +       makePrivDynArrayOfSize( (void***)&(_PRTopEnv->intervalProbes), 200); \
   16.11        \
   16.12 -      _PRMasterEnv->probeNameHashTbl = makeHashTable( 1000, &PR_int__free ); \
   16.13 +      _PRTopEnv->probeNameHashTbl = makeHashTable( 1000, &PR_int__free ); \
   16.14        \
   16.15        /*put creation time directly into master env, for fast retrieval*/ \
   16.16     struct timeval timeStamp; \
   16.17     gettimeofday( &(timeStamp), NULL); \
   16.18 -   _PRMasterEnv->createPtInSecs = \
   16.19 +   _PRTopEnv->createPtInSecs = \
   16.20                             timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
   16.21  
   16.22     #define PR_WL__record_time_point_into_new_probe( nameStr, animSlv ) \
    17.1 --- a/Services_Offered_by_PR/Memory_Handling/vmalloc.c	Wed Sep 19 23:12:44 2012 -0700
    17.2 +++ b/Services_Offered_by_PR/Memory_Handling/vmalloc.c	Tue Oct 23 23:46:17 2012 -0700
    17.3 @@ -252,7 +252,7 @@
    17.4   {     
    17.5           MEAS__Capture_Pre_Malloc_Point
    17.6     
    17.7 -   MallocArrays* freeLists = _PRMasterEnv->freeLists;
    17.8 +   MallocArrays* freeLists = _PRTopEnv->freeLists;
    17.9     MallocProlog* foundChunk;
   17.10     
   17.11     //Return a small chunk if the requested size is smaller than 128B
   17.12 @@ -308,7 +308,7 @@
   17.13      
   17.14           MEAS__Capture_Pre_Free_Point;
   17.15           
   17.16 -   MallocArrays* freeLists = _PRMasterEnv->freeLists;
   17.17 +   MallocArrays* freeLists = _PRTopEnv->freeLists;
   17.18     MallocProlog *chunkToFree = (MallocProlog*)ptrToFree - 1;
   17.19     uint32 containerIdx;
   17.20     
   17.21 @@ -366,8 +366,8 @@
   17.22  PR_ext__create_free_list()
   17.23  {     
   17.24     //Initialize containers for small chunks and fill with zeros
   17.25 -   _PRMasterEnv->freeLists = (MallocArrays*)malloc( sizeof(MallocArrays) );
   17.26 -   MallocArrays *freeLists = _PRMasterEnv->freeLists;
   17.27 +   _PRTopEnv->freeLists = (MallocArrays*)malloc( sizeof(MallocArrays) );
   17.28 +   MallocArrays *freeLists = _PRTopEnv->freeLists;
   17.29     
   17.30     freeLists->smallChunks = 
   17.31             (MallocProlog**)malloc(SMALL_CHUNK_COUNT*sizeof(MallocProlog*));
    18.1 --- a/Services_Offered_by_PR/Memory_Handling/vmalloc.h	Wed Sep 19 23:12:44 2012 -0700
    18.2 +++ b/Services_Offered_by_PR/Memory_Handling/vmalloc.h	Tue Oct 23 23:46:17 2012 -0700
    18.3 @@ -55,6 +55,7 @@
    18.4  void *
    18.5  PR_int__malloc( size_t sizeRequested );
    18.6  #define PR_PI__malloc  PR_int__malloc
    18.7 +#define PR_SS__malloc  PR_int__malloc
    18.8  
    18.9  void *
   18.10  PR_WL__malloc( int32  sizeRequested ); /*BUG: -- get master lock */
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/Services_Offered_by_PR/Services_Language/PRServ.c	Tue Oct 23 23:46:17 2012 -0700
    19.3 @@ -0,0 +1,517 @@
    19.4 +/*
    19.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
    19.6 + *
    19.7 + * Licensed under BSD
    19.8 + */
    19.9 +
   19.10 +#include <stdio.h>
   19.11 +#include <stdlib.h>
   19.12 +#include <malloc.h>
   19.13 +
   19.14 +#include "Queue_impl/PrivateQueue.h"
   19.15 +#include "Hash_impl/PrivateHash.h"
   19.16 +
   19.17 +#include "../../PR.h"
   19.18 +#include "PRServ.h"
   19.19 +
   19.20 +//==========================================================================
   19.21 +void
   19.22 +PRServ__init_Helper();
   19.23 +//==========================================================================
   19.24 +
   19.25 +
   19.26 +
   19.27 +//===========================================================================
   19.28 +
   19.29 +
   19.30 +/*These are the library functions *called in the application*
   19.31 + * 
   19.32 + */
   19.33 +
   19.34 +
   19.35 +//===========================================================================
   19.36 +
   19.37 +int32
   19.38 +PRServ__giveMinWorkUnitCycles( float32 percentOverhead )
   19.39 + {
   19.40 +   return MIN_WORK_UNIT_CYCLES;
   19.41 + }
   19.42 +
   19.43 +int32
   19.44 +PRServ__giveIdealNumWorkUnits()
   19.45 + {
   19.46 +   return NUM_ANIM_SLOTS * NUM_CORES;
   19.47 + }
   19.48 +
   19.49 +int32
   19.50 +PRServ__give_number_of_cores_to_schedule_onto()
   19.51 + {
   19.52 +   return NUM_CORES;
   19.53 + }
   19.54 +
   19.55 +/*For now, use TSC -- later, make these two macros with assembly that first
   19.56 + * saves jump point, and second jumps back several times to get reliable time
   19.57 + */
   19.58 +void
   19.59 +PRServ__begin_primitive()
   19.60 + { PRServSemData *semData;
   19.61 +   
   19.62 +   semData = (PRServSemData *)PR_WL__give_sem_data( animSlv, PRServ_MAGIC_NUMBER);
   19.63 +
   19.64 +   saveLowTimeStampCountInto( semData->primitiveStartTime );
   19.65 + }
   19.66 +
   19.67 +/*Just quick and dirty for now -- make reliable later
   19.68 + * will want this to jump back several times -- to be sure cache is warm
   19.69 + * because don't want comm time included in calc-time measurement -- and
   19.70 + * also to throw out any "weird" values due to OS interrupt or TSC rollover
   19.71 + */
   19.72 +int32
   19.73 +PRServ__end_primitive_and_give_cycles( SlaveVP animSlv )
   19.74 + { int32 endTime, startTime;
   19.75 +   PRServSemData *semData;
   19.76 +   
   19.77 +   //TODO: fix by repeating time-measurement
   19.78 +   saveLowTimeStampCountInto( endTime );
   19.79 +   semData = (PRServSemData *)PR_WL__give_sem_data( animSlv, PRServ_MAGIC_NUMBER);
   19.80 +   startTime = semData->primitiveStartTime;
   19.81 +   return (endTime - startTime - 2*TSC_LOW_CYCLES);
   19.82 + }
   19.83 +
   19.84 +
   19.85 +
   19.86 +//===========================================================================
   19.87 +
   19.88 +SlaveVP *
   19.89 +PRServ__create_thread( TopLevelFnPtr fnPtr,   void *initData,
   19.90 +                        SlaveVP *creatingThd )
   19.91 + { 
   19.92 +   return PRServ__create_thread_w_ID_and_affinity( fnPtr, initData, NO_ID,
   19.93 +                                                        ANY_CORE, creatingThd );
   19.94 + }
   19.95 +
   19.96 +SlaveVP *
   19.97 +PRServ__create_thread_w_ID( TopLevelFnPtr fnPtr,   void *initData, int32 *thdID,
   19.98 +                         SlaveVP *creatingThd )
   19.99 + { 
  19.100 +   return PRServ__create_thread_w_ID_and_affinity( fnPtr, initData, thdID, 
  19.101 +                                                        ANY_CORE, creatingThd );
  19.102 + }
  19.103 +
  19.104 +
  19.105 +SlaveVP *
  19.106 +PRServ__create_thread_w_ID_and_affinity( TopLevelFnPtr fnPtr,   void *initData, 
  19.107 +                    int32 *thdID, int32 coreToAssignOnto, SlaveVP *creatingThd )
  19.108 + { PRServSemReq reqData;
  19.109 +
  19.110 +      //the semantic request data is on the stack and disappears when this
  19.111 +      // call returns -- it's guaranteed to remain in the VP's stack for as
  19.112 +      // long as the VP is suspended.
  19.113 +   reqData.reqType            = create_slave; //know type because in a PR create req
  19.114 +   reqData.coreToAssignOnto   = coreToAssignOnto;
  19.115 +   
  19.116 +   PR_WL__send_create_slaveVP_req( &reqData, fnPtr, initData, thdID, 
  19.117 +                                                creatingThd, PRServ_MAGIC_NUMBER );
  19.118 +   return creatingThd->dataRetFromReq;
  19.119 + }
  19.120 +
  19.121 +/*This is always the last thing done in the code animated by a thread VP.
  19.122 + * Normally, this would be the last line of the thread's top level function.
  19.123 + * But, if the thread exits from any point, it has to do so by calling
  19.124 + * this.
  19.125 + *
  19.126 + *It simply sends a dissipate request, which handles all the state cleanup.
  19.127 + */
  19.128 +void
  19.129 +PRServ__end_thread( SlaveVP *thdToEnd )
  19.130 + {    
  19.131 +   PR_WL__send_dissipate_req( thdToEnd, PRServ_MAGIC_NUMBER );
  19.132 + }
  19.133 +
  19.134 +
  19.135 +
  19.136 +//===========================================================================
  19.137 +
  19.138 +
  19.139 +//======================= task submit and end ==============================
  19.140 +/*
  19.141 + */
  19.142 +void
  19.143 +PRServ__submit_task( PRServTaskType *taskType, void *args, SlaveVP *animSlv)
  19.144 + { PRServSemReq  reqData;
  19.145 +
  19.146 +   reqData.reqType    = submit_task;
  19.147 +   
  19.148 +   reqData.taskType   = taskType;
  19.149 +   reqData.args       = args;
  19.150 +   reqData.callingSlv = animSlv;
  19.151 +   
  19.152 +      //Create task is a special form, so have to pass as parameters, the
  19.153 +      // top-level-fn of task and the data for that fn, plus lang's req,
  19.154 +      // animating slave, and lang's magic number
  19.155 +   PR_WL__send_create_task_req( taskType->fn, args, &reqData, NO_ID, animSlv, PRServ_MAGIC_NUMBER );
  19.156 + }
  19.157 +
  19.158 +void
  19.159 +PRServ__submit_task_with_ID( PRServTaskType *taskType, void *args, int32 *taskID, 
  19.160 +                          SlaveVP     *animSlv)
  19.161 + { PRServSemReq  reqData;
  19.162 + 
  19.163 +   reqData.reqType    = submit_task;
  19.164 +   
  19.165 +   reqData.taskType   = taskType;
  19.166 +   reqData.args       = args;
  19.167 +   reqData.callingSlv = animSlv;
  19.168 + 
  19.169 +   PR_WL__send_create_task_req( taskType->fn, args, &reqData, taskID, animSlv, PRServ_MAGIC_NUMBER );
  19.170 + }
  19.171 +
  19.172 +
  19.173 +/*This call is the last to happen in every task.  It causes the slave to
  19.174 + * suspend and get the next task out of the task-queue.  Notice there is no
  19.175 + * assigner here.. only one slave, no slave ReadyQ, and so on..
  19.176 + *Can either make the assigner take the next task out of the taskQ, or can
  19.177 + * leave all as it is, and make task-end take the next task.
  19.178 + *Note: this fits the case in the new PR for no-context tasks, so will use
  19.179 + * the built-in taskQ of new PR, and should be local and much faster.
  19.180 + * 
  19.181 + *The task-stub is saved in the animSlv, so the request handler will get it
  19.182 + * from there, along with the task-type which has arg types, and so on..
  19.183 + * 
  19.184 + * NOTE: if want, don't need to send the animating SlaveVP around.. 
  19.185 + * instead, can make a single slave per core, and coreCtrlr looks up the
  19.186 + * slave from having the core number.
  19.187 + * 
  19.188 + *But, to stay compatible with all the other PR languages, leave it in..
  19.189 + */
  19.190 +void
  19.191 +PRServ__end_task( SlaveVP *animSlv )
  19.192 + { PRServSemReq  reqData;
  19.193 +
  19.194 +   reqData.reqType      = end_task;
  19.195 +   reqData.callingSlv   = animSlv;
  19.196 +   
  19.197 +   PR_WL__send_end_task_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.198 + }
  19.199 +
  19.200 +
  19.201 +/*Waits for all tasks that are direct children to end, then resumes calling
  19.202 + * task or thread
  19.203 + */
  19.204 +void
  19.205 +PRServ__taskwait(SlaveVP *animSlv)
  19.206 + {
  19.207 +    PRServSemReq  reqData;
  19.208 +
  19.209 +   reqData.reqType      = taskwait;
  19.210 +   reqData.callingSlv   = animSlv;
  19.211 +   
  19.212 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.213 + }
  19.214 +
  19.215 +
  19.216 +
  19.217 +//==========================  send and receive ============================
  19.218 +//
  19.219 +
  19.220 +inline int32 *
  19.221 +PRServ__give_self_taskID( SlaveVP *animSlv )
  19.222 + {
  19.223 +   return PR__give_task_ID( animSlv, PRServ_MAGIC_NUMBER );
  19.224 + }
  19.225 +
  19.226 +//================================ send ===================================
  19.227 +
  19.228 +void
  19.229 +PRServ__send_of_type_to( void *msg, const int32 type, int32 *receiverID,
  19.230 +                      SlaveVP *senderSlv )
  19.231 + { PRServSemReq  reqData;
  19.232 +
  19.233 +   reqData.reqType    = send_type_to;
  19.234 +   
  19.235 +   reqData.msg        = msg;
  19.236 +   reqData.msgType    = type;
  19.237 +   reqData.receiverID = receiverID;
  19.238 +   reqData.senderSlv  = senderSlv;
  19.239 +   
  19.240 +   reqData.nextReqInHashEntry = NULL;
  19.241 +
  19.242 +   PR_WL__send_sem_request( &reqData, senderSlv, PRServ_MAGIC_NUMBER );
  19.243 +
  19.244 +      //When come back from suspend, no longer own data reachable from msg
  19.245 + }
  19.246 +
  19.247 +void
  19.248 +PRServ__send_from_to( void *msg, int32 *senderID, int32 *receiverID, SlaveVP *senderSlv )
  19.249 + { PRServSemReq  reqData;
  19.250 +
  19.251 +   reqData.reqType     = send_from_to;
  19.252 +   
  19.253 +   reqData.msg         = msg;
  19.254 +   reqData.senderID    = senderID;
  19.255 +   reqData.receiverID  = receiverID;
  19.256 +   reqData.senderSlv   = senderSlv;
  19.257 +
  19.258 +   reqData.nextReqInHashEntry = NULL;
  19.259 +
  19.260 +   PR_WL__send_sem_request( &reqData, senderSlv, PRServ_MAGIC_NUMBER );
  19.261 + }
  19.262 +
  19.263 +
  19.264 +//================================ receive ================================
  19.265 +
  19.266 +/*The "type" version of send and receive creates a many-to-one relationship.
  19.267 + * The sender is anonymous, and many sends can stack up, waiting to be
  19.268 + * received.  The same receiver can also have send from-to's
  19.269 + * waiting for it, and those will be kept separate from the "type"
  19.270 + * messages.
  19.271 + */
  19.272 +void *
  19.273 +PRServ__receive_type_to( const int32 type, int32* receiverID, SlaveVP *receiverSlv )
  19.274 + {       DEBUG__printf1(dbgRqstHdlr,"WL: receive type to %d",receiverID[1] );
  19.275 +   PRServSemReq  reqData;
  19.276 +
  19.277 +   reqData.reqType     = receive_type_to;
  19.278 +   
  19.279 +   reqData.msgType     = type;
  19.280 +   reqData.receiverID  = receiverID;
  19.281 +   reqData.receiverSlv = receiverSlv;
  19.282 +   
  19.283 +   reqData.nextReqInHashEntry = NULL;
  19.284 +
  19.285 +   PR_WL__send_sem_request( &reqData, receiverSlv, PRServ_MAGIC_NUMBER );
  19.286 +   
  19.287 +   return receiverSlv->dataRetFromReq;
  19.288 + }
  19.289 +
  19.290 +
  19.291 +
  19.292 +/*Call this at the point a receiving task wants in-coming data.
  19.293 + * Use this from-to form when know senderID -- it makes a direct channel
  19.294 + * between sender and receiver.
  19.295 + */
  19.296 +void *
  19.297 +PRServ__receive_from_to( int32 *senderID, int32 *receiverID, SlaveVP *receiverSlv )
  19.298 + { 
  19.299 +   PRServSemReq  reqData;
  19.300 +
  19.301 +   reqData.reqType     = receive_from_to;
  19.302 +
  19.303 +   reqData.senderID    = senderID;
  19.304 +   reqData.receiverID  = receiverID;
  19.305 +   reqData.receiverSlv = receiverSlv;
  19.306 +
  19.307 +   reqData.nextReqInHashEntry = NULL;
  19.308 +      DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", reqData.senderID[1], reqData.receiverID[1]);
  19.309 +      
  19.310 +   PR_WL__send_sem_request( &reqData, receiverSlv, PRServ_MAGIC_NUMBER );
  19.311 +
  19.312 +   return receiverSlv->dataRetFromReq;
  19.313 + }
  19.314 +
  19.315 +
  19.316 +
  19.317 +
  19.318 +//==========================================================================
  19.319 +//
  19.320 +/*A function singleton is a function whose body executes exactly once, on a
  19.321 + * single core, no matter how many times the fuction is called and no
  19.322 + * matter how many cores or the timing of cores calling it.
  19.323 + *
  19.324 + *A data singleton is a ticket attached to data.  That ticket can be used
  19.325 + * to get the data through the function exactly once, no matter how many
  19.326 + * times the data is given to the function, and no matter the timing of
  19.327 + * trying to get the data through from different cores.
  19.328 + */
  19.329 +
  19.330 +/*asm function declarations*/
  19.331 +void asm_save_ret_to_singleton(PRServSingleton *singletonPtrAddr);
  19.332 +void asm_write_ret_from_singleton(PRServSingleton *singletonPtrAddr);
  19.333 +
  19.334 +/*Fn singleton uses ID as index into array of singleton structs held in the
  19.335 + * semantic environment.
  19.336 + */
  19.337 +void
  19.338 +PRServ__start_fn_singleton( int32 singletonID,   SlaveVP *animSlv )
  19.339 + {
  19.340 +   PRServSemReq  reqData;
  19.341 +
  19.342 +      //
  19.343 +   reqData.reqType     = singleton_fn_start;
  19.344 +   reqData.singletonID = singletonID;
  19.345 +
  19.346 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.347 +   if( animSlv->dataRetFromReq ) //will be 0 or addr of label in end singleton
  19.348 +    {
  19.349 +       PRServSemEnv *semEnv = PR_WL__give_sem_env_for( animSlv, PRServ_MAGIC_NUMBER );
  19.350 +       asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
  19.351 +    }
  19.352 + }
  19.353 +
  19.354 +/*Data singleton hands addr of loc holding a pointer to a singleton struct.
  19.355 + * The start_data_singleton makes the structure and puts its addr into the
  19.356 + * location.
  19.357 + */
  19.358 +void
  19.359 +PRServ__start_data_singleton( PRServSingleton **singletonAddr,  SlaveVP *animSlv )
  19.360 + {
  19.361 +   PRServSemReq  reqData;
  19.362 +
  19.363 +   if( *singletonAddr && (*singletonAddr)->hasFinished )
  19.364 +       goto JmpToEndSingleton;
  19.365 +   
  19.366 +   reqData.reqType          = singleton_data_start;
  19.367 +   reqData.singletonPtrAddr = singletonAddr;
  19.368 +
  19.369 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.370 +   if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr
  19.371 +    {    //Assembly code changes the return addr on the stack to the one
  19.372 +         // saved into the singleton by the end-singleton-fn
  19.373 +         //The return addr is at 0x4(%%ebp)
  19.374 +        JmpToEndSingleton:
  19.375 +          asm_write_ret_from_singleton(*singletonAddr);
  19.376 +    }
  19.377 +   //now, simply return
  19.378 +   //will exit either from the start singleton call or the end-singleton call
  19.379 + }
  19.380 +
  19.381 +/*Uses ID as index into array of flags.  If flag already set, resumes from
  19.382 + * end-label.  Else, sets flag and resumes normally.
  19.383 + *
  19.384 + *Note, this call cannot be inlined because the instr addr at the label
  19.385 + * inside is shared by all invocations of a given singleton ID.
  19.386 + */
  19.387 +void
  19.388 +PRServ__end_fn_singleton( int32 singletonID, SlaveVP *animSlv )
  19.389 + {
  19.390 +   PRServSemReq  reqData;
  19.391 +
  19.392 +      //don't need this addr until after at least one singleton has reached
  19.393 +      // this function
  19.394 +   PRServSemEnv *semEnv = PR_WL__give_sem_env_for( animSlv, PRServ_MAGIC_NUMBER );
  19.395 +   asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
  19.396 +
  19.397 +   reqData.reqType     = singleton_fn_end;
  19.398 +   reqData.singletonID = singletonID;
  19.399 +
  19.400 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.401 +
  19.402 +EndSingletonInstrAddr:
  19.403 +   return;
  19.404 + }
  19.405 +
  19.406 +void
  19.407 +PRServ__end_data_singleton(  PRServSingleton **singletonPtrAddr, SlaveVP *animSlv )
  19.408 + {
  19.409 +   PRServSemReq  reqData;
  19.410 +
  19.411 +      //don't need this addr until after singleton struct has reached
  19.412 +      // this function for first time
  19.413 +      //do assembly that saves the return addr of this fn call into the
  19.414 +      // data singleton -- that data-singleton can only be given to exactly
  19.415 +      // one instance in the code of this function.  However, can use this
  19.416 +      // function in different places for different data-singletons.
  19.417 +   asm_save_ret_to_singleton(*singletonPtrAddr);
  19.418 +
  19.419 +   reqData.reqType          = singleton_data_end;
  19.420 +   reqData.singletonPtrAddr = singletonPtrAddr;
  19.421 +
  19.422 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.423 + }
  19.424 +
  19.425 +/*This executes the function in the masterVP, so it executes in isolation
  19.426 + * from any other copies -- only one copy of the function can ever execute
  19.427 + * at a time.
  19.428 + *
  19.429 + *It suspends to the master, and the request handler takes the function
  19.430 + * pointer out of the request and calls it, then resumes the VP.
  19.431 + *Only very short functions should be called this way -- for longer-running
  19.432 + * isolation, use transaction-start and transaction-end, which run the code
  19.433 + * between as work-code.
  19.434 + */
  19.435 +void
  19.436 +PRServ__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
  19.437 +                                    void *data, SlaveVP *animSlv )
  19.438 + {
  19.439 +   PRServSemReq  reqData;
  19.440 +
  19.441 +      //
  19.442 +   reqData.reqType          = atomic;
  19.443 +   reqData.fnToExecInMaster = ptrToFnToExecInMaster;
  19.444 +   reqData.dataForFn        = data;
  19.445 +
  19.446 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.447 + }
  19.448 +
  19.449 +
  19.450 +/*This suspends to the master.
  19.451 + *First, it looks at the VP's data, to see the highest transactionID that VP
  19.452 + * already has entered.  If the current ID is not larger, it throws an
  19.453 + * exception stating a bug in the code.  Otherwise it puts the current ID
  19.454 + * there, and adds the ID to a linked list of IDs entered -- the list is
  19.455 + * used to check that exits are properly ordered.
  19.456 + *Next it is uses transactionID as index into an array of transaction
  19.457 + * structures.
  19.458 + *If the "VP_currently_executing" field is non-null, then put requesting VP
  19.459 + * into queue in the struct.  (At some point a holder will request
  19.460 + * end-transaction, which will take this VP from the queue and resume it.)
  19.461 + *If NULL, then write requesting into the field and resume.
  19.462 + */
  19.463 +void
  19.464 +PRServ__start_transaction( int32 transactionID, SlaveVP *animSlv )
  19.465 + {
  19.466 +   PRServSemReq  reqData;
  19.467 +
  19.468 +      //
  19.469 +   reqData.callingSlv  = animSlv;
  19.470 +   reqData.reqType     = trans_start;
  19.471 +   reqData.transID     = transactionID;
  19.472 +
  19.473 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.474 + }
  19.475 +
  19.476 +/*This suspends to the master, then uses transactionID as index into an
  19.477 + * array of transaction structures.
  19.478 + *It looks at VP_currently_executing to be sure it's same as requesting VP.
  19.479 + * If different, throws an exception, stating there's a bug in the code.
  19.480 + *Next it looks at the queue in the structure.
  19.481 + *If it's empty, it sets VP_currently_executing field to NULL and resumes.
  19.482 + *If something in, gets it, sets VP_currently_executing to that VP, then
  19.483 + * resumes both.
  19.484 + */
  19.485 +void
  19.486 +PRServ__end_transaction( int32 transactionID, SlaveVP *animSlv )
  19.487 + {
  19.488 +   PRServSemReq  reqData;
  19.489 +
  19.490 +      //
  19.491 +   reqData.callingSlv      = animSlv;
  19.492 +   reqData.reqType     = trans_end;
  19.493 +   reqData.transID     = transactionID;
  19.494 +
  19.495 +   PR_WL__send_sem_request( &reqData, animSlv, PRServ_MAGIC_NUMBER );
  19.496 + }
  19.497 +
  19.498 +//======================== Internal ==================================
  19.499 +/*
  19.500 + */
  19.501 +
  19.502 +SlaveVP *
  19.503 +PRServ__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
  19.504 +                        SlaveVP *creatingSlv,  int32  coreToAssignOnto )
  19.505 + { PRServSemReq  reqData;
  19.506 +
  19.507 +      //the semantic request data is on the stack and disappears when this
  19.508 +      // call returns -- it's guaranteed to remain in the VP's stack for as
  19.509 +      // long as the VP is suspended.
  19.510 +   reqData.reqType            = create_slave_w_aff; //not used, May 2012
  19.511 +   reqData.coreToAssignOnto   = coreToAssignOnto;
  19.512 +   reqData.fnPtr              = fnPtr;
  19.513 +   reqData.initData           = initData;
  19.514 +   reqData.callingSlv         = creatingSlv;
  19.515 +
  19.516 +   PR_WL__send_create_slaveVP_req( &reqData, creatingSlv, PRServ_MAGIC_NUMBER );
  19.517 +
  19.518 +   return creatingSlv->dataRetFromReq;
  19.519 + }
  19.520 +
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/Services_Offered_by_PR/Services_Language/PRServ.h	Tue Oct 23 23:46:17 2012 -0700
    20.3 @@ -0,0 +1,296 @@
    20.4 +/*
    20.5 + *  Copyright 2009 OpenSourceStewardshipFoundation.org
    20.6 + *  Licensed under GNU General Public License version 2
    20.7 + *
    20.8 + * Author: seanhalle@yahoo.com
    20.9 + *
   20.10 + */
   20.11 +
   20.12 +#ifndef _PRServ_H
   20.13 +#define	_PRServ_H
   20.14 +
   20.15 +#include "Queue_impl/PrivateQueue.h"
   20.16 +#include "Hash_impl/PrivateHash.h"
   20.17 +#include "PR_impl/PR.h"
   20.18 +#include "Measurement/dependency.h"
   20.19 +
   20.20 +
   20.21 +//===========================================================================
   20.22 +   //uniquely identifies PRServ -- should be a jenkins char-hash of "PRServ" onto int32
   20.23 +#define PRServ_MAGIC_NUMBER 0000000002
   20.24 +
   20.25 +#define NUM_STRUCS_IN_SEM_ENV 1000
   20.26 +
   20.27 +
   20.28 +//===========================================================================
   20.29 +/*This header defines everything specific to the PRServ semantic plug-in
   20.30 + */
   20.31 +typedef struct _PRServSemReq    PRServSemReq;
   20.32 +//typedef struct _PRServTaskStub  PRServTaskStub;
   20.33 +//typedef void  (*PRServTaskFnPtr )   ( void *, SlaveVP *);
   20.34 +//typedef void  (*PtrToAtomicFn )  ( void * ); //executed atomically in master
   20.35 +//===========================================================================
   20.36 +
   20.37 +
   20.38 +//===========================================================================
   20.39 +
   20.40 +/*This is placed into semData, used for dependencies and wait construct*/
   20.41 +struct _PRServTaskStub
   20.42 + {
   20.43 + //====== The first fields must match PRLangMetaTask fields ======
   20.44 +   int32             langMagicNumber; //magic num must be 1st field of langMetaTask
   20.45 +   PRMetaTask       *protoMetaTask;   //back-link must always be 2nd field
   20.46 + //====== end PRLangMetaTask fields =========
   20.47 + 
   20.48 + };
   20.49 +
   20.50 +
   20.51 +enum PRServReqType
   20.52 + {
   20.53 +   submit_task = 1,
   20.54 +   end_task,
   20.55 +   create_slave,
   20.56 +   create_slave_w_aff,
   20.57 +   dissipate_slave,
   20.58 +   //===============================
   20.59 +   send_type_to,
   20.60 +   receive_type_to,
   20.61 +   send_from_to,
   20.62 +   receive_from_to,
   20.63 +   //===============================
   20.64 +   taskwait,
   20.65 +   malloc_req,
   20.66 +   free_req,
   20.67 +   singleton_fn_start,
   20.68 +   singleton_fn_end,
   20.69 +   singleton_data_start,
   20.70 +   singleton_data_end,
   20.71 +   atomic,
   20.72 +   trans_start,
   20.73 +   trans_end
   20.74 + };
   20.75 +
   20.76 +struct _PRServSemReq
   20.77 + { enum PRServReqType    reqType;
   20.78 +   SlaveVP           *callingSlv;
   20.79 +   PRServTaskType       *taskType;
   20.80 +   void              *args;
   20.81 +//   PRServTaskStub       *taskStub;  //not needed -- get via PR accessor from slv
   20.82 +   
   20.83 +   SlaveVP           *senderSlv;
   20.84 +   SlaveVP           *receiverSlv;
   20.85 +   int32             *senderID;
   20.86 +   int32             *receiverID;
   20.87 +   int32              msgType;
   20.88 +   void              *msg;
   20.89 +   PRServSemReq         *nextReqInHashEntry;
   20.90 +//In PRReq:   int32             *taskID;
   20.91 +   
   20.92 +   TopLevelFnPtr      fnPtr;
   20.93 +   void              *initData;
   20.94 +   int32              coreToAssignOnto;
   20.95 +
   20.96 +//These, below, should move to util language..
   20.97 +   int32              sizeToMalloc;
   20.98 +   void              *ptrToFree;
   20.99 +
  20.100 +   int32              singletonID;
  20.101 +   PRServSingleton     **singletonPtrAddr;
  20.102 +
  20.103 +   PtrToAtomicFn      fnToExecInMaster;
  20.104 +   void              *dataForFn;
  20.105 +
  20.106 +   int32              transID;
  20.107 + }
  20.108 +/* PRServSemReq */;
  20.109 +
  20.110 +
  20.111 +typedef struct
  20.112 + { PRSemEnv        *protoSemEnv;
  20.113 +   PrivQueueStruc  *slavesReadyToResumeQ; //Shared (slaves not pinned)
  20.114 +   PrivQueueStruc  *freeTaskSlvRecycleQ;    //Shared
  20.115 +   PrivQueueStruc  *taskReadyQ;           //Shared (tasks not pinned)
  20.116 +   HashTable       *argPtrHashTbl;
  20.117 +   HashTable       *commHashTbl;
  20.118 +   int32            nextCoreToGetNewSlv;
  20.119 +   int32            primitiveStartTime;
  20.120 +
  20.121 +                       //fix limit on num with dynArray
  20.122 +   PRServSingleton     fnSingletons[NUM_STRUCS_IN_SEM_ENV];
  20.123 +   PRServTrans         transactionStrucs[NUM_STRUCS_IN_SEM_ENV];
  20.124 +   
  20.125 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  20.126 +   ListOfArrays* unitList;
  20.127 +   ListOfArrays* ctlDependenciesList;
  20.128 +   ListOfArrays* commDependenciesList;
  20.129 +   NtoN** ntonGroups;
  20.130 +   PrivDynArrayInfo* ntonGroupsInfo;
  20.131 +   ListOfArrays* dynDependenciesList;
  20.132 +   Unit last_in_slot[NUM_CORES * NUM_ANIM_SLOTS];
  20.133 +   ListOfArrays* hwArcs;
  20.134 +   #endif
  20.135 +
  20.136 +   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
  20.137 +   ListOfArrays* counterList[NUM_CORES];
  20.138 +   #endif
  20.139 + }
  20.140 +PRServSemEnv;
  20.141 +
  20.142 +
  20.143 +typedef struct _TransListElem TransListElem;
  20.144 +struct _TransListElem
  20.145 + {
  20.146 +   int32          transID;
  20.147 +   TransListElem *nextTrans;
  20.148 + };
  20.149 +//TransListElem
  20.150 + 
  20.151 +/* PR now handles what this used to be used for
  20.152 +enum PRServSlvType
  20.153 + { FreeTaskSlv = 1,
  20.154 +   SlotTaskSlv,
  20.155 +   ThreadSlv
  20.156 + };
  20.157 +*/
  20.158 + 
  20.159 +typedef struct
  20.160 + {
  20.161 +   int32            highestTransEntered;
  20.162 +   TransListElem   *lastTransEntered;
  20.163 +   int32            primitiveStartTime;
  20.164 +//   PRServTaskStub     *taskStub; //get from slave via PR accessor
  20.165 + }
  20.166 +PRServSemData;
  20.167 + 
  20.168 +//===========================================================================
  20.169 +
  20.170 +void
  20.171 +PRServ__create_seed_slave_and_do_work( TopLevelFnPtr fn, void *initData );
  20.172 +
  20.173 +int32
  20.174 +PRServ__giveMinWorkUnitCycles( float32 percentOverhead );
  20.175 +
  20.176 +void
  20.177 +PRServ__begin_primitive();
  20.178 +
  20.179 +int32
  20.180 +PRServ__end_primitive_and_give_cycles();
  20.181 +
  20.182 +int32
  20.183 +PRServ__giveIdealNumWorkUnits();
  20.184 +
  20.185 +int32
  20.186 +PRServ__give_number_of_cores_to_schedule_onto();
  20.187 +
  20.188 +//=======================
  20.189 +
  20.190 +void
  20.191 +PRServ__start( SlaveVP *seedSlv );
  20.192 +
  20.193 +void
  20.194 +PRServ__cleanup_after_shutdown();
  20.195 +
  20.196 +//=======================
  20.197 +
  20.198 +SlaveVP *
  20.199 +PRServ__create_thread( TopLevelFnPtr fnPtr,   void *initData,
  20.200 +                                                     SlaveVP *creatingThd );
  20.201 +
  20.202 +void
  20.203 +PRServ__end_thread( SlaveVP *thdToEnd );
  20.204 +
  20.205 +//=======================
  20.206 +
  20.207 +#define PRServ__malloc( numBytes, callingSlave ) PR_App__malloc( numBytes, callingSlave)
  20.208 +
  20.209 +#define PRServ__free(ptrToFree, callingSlave ) PR_App__free( ptrToFree, callingSlave )
  20.210 +
  20.211 +
  20.212 +//=======================
  20.213 +void
  20.214 +PRServ__submit_task( PRServTaskType *taskType, void *args, SlaveVP *animSlv);
  20.215 +
  20.216 +inline int32 *
  20.217 +PRServ__create_taskID_of_size( int32 numInts, SlaveVP *animSlv );
  20.218 +
  20.219 +void
  20.220 +PRServ__submit_task_with_ID( PRServTaskType *taskType, void *args, int32 *taskID, 
  20.221 +                          SlaveVP     *animSlv);
  20.222 +
  20.223 +void
  20.224 +PRServ__end_task( SlaveVP *animSlv );
  20.225 +
  20.226 +//=========================
  20.227 +void
  20.228 +PRServ__taskwait(SlaveVP *animSlv);
  20.229 +
  20.230 +
  20.231 +inline int32 *
  20.232 +PRServ__give_self_taskID( SlaveVP *animSlv );
  20.233 +
  20.234 +void
  20.235 +PRServ__send_of_type_to( void *msg, const int32 type, int32 *receiverID,
  20.236 +                      SlaveVP *senderSlv );
  20.237 +
  20.238 +void
  20.239 +PRServ__send_from_to( void *msg, int32 *senderID, int32 *receiverID, SlaveVP *senderSlv );
  20.240 +
  20.241 +void *
  20.242 +PRServ__receive_type_to( const int32 type, int32* receiverID, SlaveVP *receiverSlv );
  20.243 +
  20.244 +void *
  20.245 +PRServ__receive_from_to( int32 *senderID, int32 *receiverID, SlaveVP *receiverSlv );
  20.246 +
  20.247 +//======================= Concurrency Stuff ======================
  20.248 +void
  20.249 +PRServ__start_fn_singleton( int32 singletonID, SlaveVP *animSlv );
  20.250 +
  20.251 +void
  20.252 +PRServ__end_fn_singleton( int32 singletonID, SlaveVP *animSlv );
  20.253 +
  20.254 +void
  20.255 +PRServ__start_data_singleton( PRServSingleton **singeltonAddr, SlaveVP *animSlv );
  20.256 +
  20.257 +void
  20.258 +PRServ__end_data_singleton( PRServSingleton **singletonAddr, SlaveVP *animSlv );
  20.259 +
  20.260 +void
  20.261 +PRServ__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
  20.262 +                                    void *data, SlaveVP *animSlv );
  20.263 +
  20.264 +void
  20.265 +PRServ__start_transaction( int32 transactionID, SlaveVP *animSlv );
  20.266 +
  20.267 +void
  20.268 +PRServ__end_transaction( int32 transactionID, SlaveVP *animSlv );
  20.269 +
  20.270 +
  20.271 +//=========================  Internal use only  =============================
  20.272 +void
  20.273 +PRServ__Request_Handler( SlaveVP *requestingSlv, void *_semEnv );
  20.274 +
  20.275 +SlaveVP *
  20.276 +PRServ__assign_work_to_slot( void *_semEnv, AnimSlot *slot );
  20.277 +
  20.278 +SlaveVP*
  20.279 +PRServ__create_slave_helper( TopLevelFnPtr fnPtr, void *initData,
  20.280 +                          PRServSemEnv *semEnv,    int32 coreToAssignOnto );
  20.281 +
  20.282 +PRMetaTask *
  20.283 +PR_int__create_generic_slave_meta_task( void *initData );
  20.284 +
  20.285 +
  20.286 +SlaveVP *
  20.287 +PRServ__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
  20.288 +                          SlaveVP *creatingSlv );
  20.289 +
  20.290 +SlaveVP *
  20.291 +PRServ__create_slave_with_affinity( TopLevelFnPtr fnPtr,    void *initData,
  20.292 +                            SlaveVP *creatingSlv, int32 coreToAssignOnto);
  20.293 +
  20.294 +//=====================  Measurement of Lang Overheads  =====================
  20.295 +#include "Measurement/PRServ_Measurement.h"
  20.296 +
  20.297 +//===========================================================================
  20.298 +#endif	/* _PRServ_H */
  20.299 +
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/Services_Offered_by_PR/Services_Language/PRServ_PluginFns.c	Tue Oct 23 23:46:17 2012 -0700
    21.3 @@ -0,0 +1,211 @@
    21.4 +/*
    21.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
    21.6 + *
    21.7 + * Licensed under BSD
    21.8 + */
    21.9 +
   21.10 +#include <stdio.h>
   21.11 +#include <stdlib.h>
   21.12 +
   21.13 +#include "Queue_impl/PrivateQueue.h"
   21.14 +#include "PRServ.h"
   21.15 +#include "PRServ_Request_Handlers.h"
   21.16 +
   21.17 +//=========================== Local Fn Prototypes ===========================
   21.18 +void
   21.19 +resume_slaveVP( SlaveVP *slave, PRServSemEnv *semEnv );
   21.20 +
   21.21 +inline void
   21.22 +handleSemReq( PRReqst *req, SlaveVP *requestingSlv, PRServSemEnv *semEnv );
   21.23 +
   21.24 +inline void
   21.25 +handleDissipate(                SlaveVP *requestingSlv, PRServSemEnv *semEnv );
   21.26 +
   21.27 +inline void
   21.28 +handleCreate(    PRReqst *req, SlaveVP *requestingSlv, PRServSemEnv *semEnv );
   21.29 +
   21.30 +//============================== Assigner ==================================
   21.31 +//
   21.32 +/*The assigner is complicated by having both tasks and explicitly created
   21.33 + * VPs, and by tasks being able to suspend.
   21.34 + *It can't use an explicit slave to animate a task because of stack
   21.35 + * pollution. So, it has to keep the two kinds separate.
   21.36 + * 
   21.37 + *Q: one assigner for both tasks and slaves, or separate?
   21.38 + * 
   21.39 + *Simplest way for the assigner logic is with a Q for extra empty task
   21.40 + * slaves, and another Q for slaves of both types that are ready to resume.
   21.41 + *
   21.42 + *Keep a current task slave for each anim slot. The request handler manages
   21.43 + * it by pulling from the extraTaskSlvQ when a task suspends, or else
   21.44 + * creating a new task slave if taskSlvQ empty. 
   21.45 + *Assigner only assigns a task to the current task slave for the slot.
   21.46 + *If no more tasks, then takes a ready to resume slave, if also none of them
   21.47 + * then dissipates extra task slaves (one per invocation).
   21.48 + *Shutdown condition is: must have no suspended tasks, and no suspended
   21.49 + * explicit slaves and no more tasks in taskQ.  Will only have the masters
   21.50 + * plus a current task slave for each slot.. detects this condition. 
   21.51 + * 
   21.52 + *Having the two types of slave is part of having communications directly
   21.53 + * between tasks, and tasks to explicit slaves, which requires the ability
   21.54 + * to suspend both kinds, but also to keep explicit slave stacks clean from
   21.55 + * the junk tasks are allowed to leave behind.
   21.56 + */
   21.57 +SlaveVP *
   21.58 +PRServ__assign_work_to_slot( void *_semEnv, AnimSlot *slot )
   21.59 + { SlaveVP     *returnSlv;
   21.60 +   PRServSemEnv   *semEnv;
   21.61 +   int32        coreNum, slotNum;
   21.62 +   PRMetaTask  *returnMetaTask = NULL, *newTaskStub;
   21.63 +  
   21.64 +   coreNum = slot->coreSlotIsOn;
   21.65 +   slotNum = slot->slotIdx;
   21.66 +   
   21.67 +   semEnv  = (PRServSemEnv *)_semEnv;
   21.68 +   
   21.69 +      //Check for suspended slaves that are ready to resume
   21.70 +   returnSlv = readPrivQ( semEnv->slavesReadyToResumeQ );
   21.71 +   if( returnSlv != NULL )  //Yes, have a slave, so return it.
   21.72 +    { returnSlv->coreAnimatedBy   = coreNum;
   21.73 +      returnMetaTask = returnSlv->metaTask;
   21.74 +      goto ReturnTheMetaTask;
   21.75 +    }
   21.76 +   
   21.77 +   newTaskStub = readPrivQ( semEnv->taskReadyQ );
   21.78 +   if( newTaskStub != NULL )
   21.79 +    { returnMetaTask = newTaskStub->protoMetaTask;
   21.80 +      goto ReturnTheMetaTask;
   21.81 +    }
   21.82 +
   21.83 +ReturnTheMetaTask:  //doing gotos to here should help with holistic..
   21.84 +
   21.85 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   21.86 +   //This no longer works -- should be moved into PR in master
   21.87 +   //This assumes the task has already been assigned to a slave, which happens
   21.88 +   // inside Master..
   21.89 +   if( returnMetaTask == NULL )
   21.90 +    { returnSlv = semEnv->process->idleSlv[coreNum][slotNum]; 
   21.91 +    
   21.92 +         //things that would normally happen in resume(), but these VPs
   21.93 +         // never go there
   21.94 +      returnSlv->numTimesAssignedToASlot++;
   21.95 +      Unit newU;
   21.96 +      newU.vp = returnSlv->slaveID;
   21.97 +      newU.task = returnSlv->numTimesAssignedToASlot;
   21.98 +      addToListOfArrays(Unit,newU,semEnv->unitList);
   21.99 +
  21.100 +      if (returnSlv->numTimesAssignedToASlot > 1)
  21.101 +       { Dependency newD;
  21.102 +         newD.from_vp = returnSlv->slaveID;
  21.103 +         newD.from_task = returnSlv->numTimesAssignedToASlot - 1;
  21.104 +         newD.to_vp = returnSlv->slaveID;
  21.105 +         newD.to_task = returnSlv->numTimesAssignedToASlot;
  21.106 +         addToListOfArrays(Dependency, newD, semEnv->ctlDependenciesList);  
  21.107 +       }
  21.108 +      returnMetaTask = returnSlv->metaTask;
  21.109 +    }
  21.110 +   else //returnSlv != NULL
  21.111 +    { //assignSlv->numTimesAssigned++;
  21.112 +      Unit prev_in_slot = 
  21.113 +         semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
  21.114 +      if(prev_in_slot.vp != 0)
  21.115 +       { Dependency newD;
  21.116 +         newD.from_vp = prev_in_slot.vp;
  21.117 +         newD.from_task = prev_in_slot.task;
  21.118 +         newD.to_vp = returnSlv->slaveID;
  21.119 +         newD.to_task = returnSlv->numTimesAssignedToASlot;
  21.120 +         addToListOfArrays(Dependency,newD,semEnv->hwArcs);   
  21.121 +       }
  21.122 +      prev_in_slot.vp = returnSlv->slaveID;
  21.123 +      prev_in_slot.task = returnSlv->numTimesAssignedToASlot;
  21.124 +      semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] =
  21.125 +         prev_in_slot;        
  21.126 +    }
  21.127 +   #endif
  21.128 +   return( returnMetaTask );
  21.129 + }
  21.130 +
  21.131 +
  21.132 +//===========================  Request Handler  ============================
  21.133 +//
  21.134 +/*
  21.135 + * (Not inline because invoked indirectly via a pointer)
  21.136 + */
  21.137 +
  21.138 +void
  21.139 +handleSemReq( PRReqst *req, SlaveVP *reqSlv, PRServSemEnv *semEnv )
  21.140 + { PRServSemReq *semReq;
  21.141 +
  21.142 +   semReq = PR_PI__take_sem_reqst_from(req);
  21.143 +   if( semReq == NULL ) return;
  21.144 +   switch( semReq->reqType )  //sem handlers are all in other file
  21.145 +    {
  21.146 +      case send_type_to:    handleSendTypeTo(   semReq,         semEnv);
  21.147 +         break;
  21.148 +      case send_from_to:    handleSendFromTo(   semReq,         semEnv);
  21.149 +         break;
  21.150 +      case receive_type_to: handleReceiveTypeTo(semReq,         semEnv);
  21.151 +         break;
  21.152 +      case receive_from_to: handleReceiveFromTo(semReq,         semEnv);
  21.153 +         break;
  21.154 +      case taskwait:        handleTaskwait(     semReq, reqSlv, semEnv);
  21.155 +           break;
  21.156 +         
  21.157 +      //====================================================================
  21.158 +      case malloc_req:      handleMalloc(       semReq, reqSlv, semEnv);
  21.159 +         break;
  21.160 +      case free_req:        handleFree(         semReq, reqSlv, semEnv);
  21.161 +         break;
  21.162 +      case singleton_fn_start:  handleStartFnSingleton(semReq, reqSlv, semEnv);
  21.163 +         break;
  21.164 +      case singleton_fn_end:    handleEndFnSingleton(  semReq, reqSlv, semEnv);
  21.165 +         break;
  21.166 +      case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv);
  21.167 +         break;
  21.168 +      case singleton_data_end:  handleEndDataSingleton(semReq, reqSlv, semEnv);
  21.169 +         break;
  21.170 +      case atomic:          handleAtomic(       semReq, reqSlv, semEnv);
  21.171 +         break;
  21.172 +      case trans_start:     handleTransStart(   semReq, reqSlv, semEnv);
  21.173 +         break;
  21.174 +      case trans_end:       handleTransEnd(     semReq, reqSlv, semEnv);
  21.175 +         break;
  21.176 +    }
  21.177 + }
  21.178 +
  21.179 +
  21.180 +
  21.181 +
  21.182 +//=========================== Helper ==============================
  21.183 +void
  21.184 +resume_slaveVP( SlaveVP *slave, PRServSemEnv *semEnv )
  21.185 + {
  21.186 +      //both suspended tasks and suspended explicit slaves resumed with this
  21.187 +   writePrivQ( slave, semEnv->slavesReadyToResumeQ );
  21.188 +   if( semEnv->protoSemEnv->hasWork != TRUE ) 
  21.189 +       semEnv->protoSemEnv->hasWork = TRUE;
  21.190 +   
  21.191 +   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
  21.192 +/*
  21.193 +   int lastRecordIdx = slave->counter_history_array_info->numInArray -1;
  21.194 +   CounterRecord* lastRecord = slave->counter_history[lastRecordIdx];
  21.195 +   saveLowTimeStampCountInto(lastRecord->unblocked_timestamp);
  21.196 +*/
  21.197 +   #endif
  21.198 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  21.199 +   slave->numTimesAssignedToASlot++; //Somewhere here!
  21.200 +   Unit newU;
  21.201 +   newU.vp = slave->slaveID;
  21.202 +   newU.task = slave->numTimesAssignedToASlot;
  21.203 +   addToListOfArrays(Unit,newU,semEnv->unitList);
  21.204 +   
  21.205 +   if (slave->numTimesAssignedToASlot > 1)
  21.206 +    { Dependency newD;
  21.207 +      newD.from_vp = slave->slaveID;
  21.208 +      newD.from_task = slave->numTimesAssignedToASlot - 1;
  21.209 +      newD.to_vp = slave->slaveID;
  21.210 +      newD.to_task = slave->numTimesAssignedToASlot;
  21.211 +      addToListOfArrays(Dependency, newD ,semEnv->ctlDependenciesList);  
  21.212 +    }
  21.213 +   #endif
  21.214 + }
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/Services_Offered_by_PR/Services_Language/PRServ_Request_Handlers.c	Tue Oct 23 23:46:17 2012 -0700
    22.3 @@ -0,0 +1,1285 @@
    22.4 +/*
    22.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
    22.6 + *
    22.7 + * Licensed under BSD
    22.8 + */
    22.9 +
   22.10 +#include <stdio.h>
   22.11 +#include <stdlib.h>
   22.12 +
   22.13 +#include "PR_impl/PR.h"
   22.14 +#include "Queue_impl/PrivateQueue.h"
   22.15 +#include "Hash_impl/PrivateHash.h"
   22.16 +#include "PRServ.h"
   22.17 +#include "PRServ_Request_Handlers.h"
   22.18 +
   22.19 +
   22.20 +
   22.21 +
   22.22 +//=========================== Local Fn Prototypes ===========================
   22.23 +void
   22.24 +resume_slaveVP( SlaveVP *slave, PRServSemEnv *semEnv );
   22.25 +
   22.26 +
   22.27 +
   22.28 +//==========================================================================
   22.29 +//                           Helpers
   22.30 +//
   22.31 +
   22.32 +/*Only clone the elements of req used in these reqst handlers
   22.33 + */
   22.34 +PRServSemReq *
   22.35 +cloneReq( PRServSemReq *semReq )
   22.36 + { PRServSemReq *clonedReq;
   22.37 +
   22.38 +   clonedReq             = PR_PI__malloc( sizeof(PRServSemReq) );
   22.39 +   clonedReq->reqType    = semReq->reqType;
   22.40 +   clonedReq->senderSlv  = semReq->senderSlv;
   22.41 +   clonedReq->receiverSlv= semReq->receiverSlv;
   22.42 +   clonedReq->msg        = semReq->msg;
   22.43 +   clonedReq->nextReqInHashEntry = NULL;
   22.44 +   
   22.45 +   return clonedReq;
   22.46 + }
   22.47 +
   22.48 +
   22.49 +
   22.50 +HashEntry *
   22.51 +giveEntryElseInsertReqst32( int32 *key, PRServSemReq *semReq,
   22.52 +                            HashTable   *commHashTbl )
   22.53 + { HashEntry    *entry;
   22.54 +   PRServSemReq    *waitingReq;
   22.55 +
   22.56 +   entry = getEntryFromTable32( key, commHashTbl );
   22.57 +   if( entry == NULL )
   22.58 +    {    //no waiting sends or receives, so add this request and exit
   22.59 +         // note: have to clone the request because it's on stack of sender
   22.60 +      addValueIntoTable32( key, cloneReq( semReq ), commHashTbl );
   22.61 +      return NULL;
   22.62 +    }
   22.63 +   waitingReq = (PRServSemReq *)entry->content;
   22.64 +   if( waitingReq == NULL )  //might happen when last waiting gets paired
   22.65 +    {    //no waiting sends or receives, so add this request and exit
   22.66 +      entry->content = semReq;
   22.67 +      return NULL;
   22.68 +    }
   22.69 +   return entry;
   22.70 + }
   22.71 +
   22.72 +      
   22.73 +inline PRServPointerEntry *
   22.74 +create_pointer_entry( )
   22.75 + { PRServPointerEntry *newEntry;
   22.76 +   
   22.77 +   newEntry = PR_PI__malloc( sizeof(PRServPointerEntry) );
   22.78 +   newEntry->hasEnabledNonFinishedWriter = FALSE;
   22.79 +   newEntry->numEnabledNonDoneReaders    = 0;
   22.80 +   newEntry->waitersQ                    = makePrivQ();
   22.81 +      
   22.82 +   return newEntry;
   22.83 + }
   22.84 +
   22.85 +/*malloc's space and initializes fields -- and COPIES the arg values
   22.86 + * to new space
   22.87 + */
   22.88 +inline PRServTaskStub *
   22.89 +create_task_stub( PRServTaskType *taskType, void **args )
   22.90 + { void **newArgs;
   22.91 +   PRServTaskStub* newStub = PR_int__malloc( sizeof(PRMetaTask) + taskType->sizeOfArgs );
   22.92 +   newStub->numBlockingProp = taskType->numCtldArgs;
   22.93 +   newStub->taskType   = taskType;
   22.94 +   newStub->ptrEntries = 
   22.95 +      PR_int__malloc( taskType->numCtldArgs * sizeof(PRServPointerEntry *) );
   22.96 +   newArgs = (void **)( (uint8 *)newStub + sizeof(PRMetaTask) );
   22.97 +   newStub->args = newArgs;
   22.98 +   newStub->numLiveChildTasks   = 0;
   22.99 +   newStub->numLiveChildThreads = 0;
  22.100 +   newStub->isEnded = FALSE;
  22.101 +   
  22.102 +      //Copy the arg-pointers.. can be more arguments than just the ones 
  22.103 +      // that StarSs uses to control ordering of task execution.
  22.104 +   memcpy( newArgs, args, taskType->sizeOfArgs );
  22.105 +   
  22.106 +   return newStub;
  22.107 + }
  22.108 +
  22.109 +inline PRServTaskStubCarrier *
  22.110 +create_task_carrier( PRServTaskStub *taskStub, int32 argNum, int32 rdOrWrite )
  22.111 + { PRServTaskStubCarrier *newCarrier;
  22.112 + 
  22.113 +   newCarrier = PR_PI__malloc( sizeof(PRServTaskStubCarrier) );
  22.114 +   newCarrier->taskStub = taskStub;
  22.115 +   newCarrier->argNum   = argNum;
  22.116 +   newCarrier->isReader = rdOrWrite == READER;
  22.117 + }
  22.118 +
  22.119 +
  22.120 +
  22.121 +//===========================  ==============================
  22.122 +
  22.123 +/*Application invokes this via wrapper library, when it explicitly creates a
  22.124 + * thread with the "PRServ__create_thread()" command.
  22.125 + * 
  22.126 + *Slave creation is a special form, so PR does handling before calling this.
  22.127 + * It does creation of the new slave, and hands it to this handler.  
  22.128 + *This handler is registered with PR during PRServ__start().
  22.129 + * 
  22.130 + *So, here, create a task Stub that contains a marker stating this is a thread. 
  22.131 + * Then, attach the task stub to the slave's meta Task via a PR command.
  22.132 + * 
  22.133 + *When slave dissipates, PR will call the registered recycler for the task stub.
  22.134 + */
  22.135 +inline void
  22.136 +handleCreateThd( PRReqst *req, SlaveVP *requestingSlv, SlaveVP *newSlv, PRServSemEnv *semEnv )
  22.137 + { PRServSemReq  *semReq;
  22.138 +   PRServTaskStub *taskStub, *parentTaskStub;
  22.139 +   
  22.140 +   semReq = PR_PI__take_sem_reqst_from( req );
  22.141 +
  22.142 +   parentTaskStub = PR_PI__give_lang_meta_task( requestingSlv );
  22.143 +   parentTaskStub->numLiveChildThreads += 1;
  22.144 +   
  22.145 +   taskStub = create_thread_task_stub(); //only used for wait info
  22.146 +   taskStub->parentTaskStub = parentTaskStub;
  22.147 +
  22.148 +      //note, semantic data will be initialized by separate, registered 
  22.149 +      // initializer, at the point it is accessed the first time.   
  22.150 +
  22.151 +   //================= Assign the new thread to a core ===================
  22.152 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
  22.153 +   newSlv->coreAnimatedBy = 0;
  22.154 +
  22.155 +   #else
  22.156 +      //Assigning slaves to cores is part of SSR code..
  22.157 +   int32 coreToAssignOnto = semReq->coreToAssignOnto;
  22.158 +   if(coreToAssignOnto < 0 || coreToAssignOnto >= NUM_CORES )
  22.159 +    {    //out-of-range, so round-robin assignment
  22.160 +      newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv;
  22.161 +
  22.162 +      if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 )
  22.163 +          semEnv->nextCoreToGetNewSlv  = 0;
  22.164 +      else
  22.165 +          semEnv->nextCoreToGetNewSlv += 1;
  22.166 +    }
  22.167 +   else //core num in-range, so use it
  22.168 +    { newSlv->coreAnimatedBy = coreToAssignOnto;
  22.169 +    }
  22.170 +   #endif
  22.171 +   //========================================================================
  22.172 +   
  22.173 +   
  22.174 +
  22.175 +         DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d",
  22.176 +                                    requestingSlv->slaveID, newSlv->slaveID)
  22.177 +
  22.178 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  22.179 +   Dependency newD;
  22.180 +   newD.from_vp = requestingSlv->slaveID;
  22.181 +   newD.from_task = requestingSlv->numTimesAssignedToASlot;
  22.182 +   newD.to_vp = newSlv->slaveID;
  22.183 +   newD.to_task = 1;
  22.184 +   addToListOfArrays(Dependency,newD,semEnv->commDependenciesList);   
  22.185 +   #endif
  22.186 +
  22.187 +      //For PRServ, caller needs ptr to created thread returned to it
  22.188 +   requestingSlv->dataRetFromReq = newSlv;
  22.189 +   resume_slaveVP(requestingSlv , semEnv );
  22.190 +   resume_slaveVP( newSlv,        semEnv );
  22.191 + }
  22.192 +
  22.193 +/*Initialize semantic data struct..  this initializer doesn't need any input,
  22.194 + * but some languages may need something from inside the request that was sent
  22.195 + * to create a slave..  in that case, just make initializer do the malloc then
  22.196 + * use the PR_PI__give_sem_data  inside the create handler, and fill in the
  22.197 + * semData values there.
  22.198 + */
  22.199 +void * createInitialSemanticData( )
  22.200 + { PRServSemData *semData;
  22.201 + 
  22.202 +   semData = PR_PI__malloc( sizeof(PRServSemData) );
  22.203 +   
  22.204 +   semData->highestTransEntered = -1;
  22.205 +   semData->lastTransEntered    = NULL;
  22.206 +   return semData;
  22.207 + }
  22.208 +
  22.209 +/*SlaveVP dissipate -- this is NOT task-end!, only call this to end explicitly
  22.210 + * created threads
  22.211 + */
  22.212 +inline void
  22.213 +handleDissipate( SlaveVP *requestingSlv, PRServSemEnv *semEnv )
  22.214 + { PRServSemData  *semData;
  22.215 +   PRServTaskStub  *parentTaskStub, *ownTaskStub;
  22.216 + 
  22.217 +         DEBUG__printf1(dbgRqstHdlr,"Dissipate request from processor %d",
  22.218 +                                                     requestingSlv->slaveID)
  22.219 +             
  22.220 +   ownTaskStub    = PR_PI__give_lang_meta_task( requestingSlv, PRServ_MAGIC_NUMBER );
  22.221 +   parentTaskStub = ownTaskStub->parentTaskStub;
  22.222 +   parentTaskStub->numLiveChildThreads -= 1;  //parent wasn't freed, even if ended
  22.223 +   
  22.224 +      //if all children ended, then free this task's stub
  22.225 +      // else, keep stub around, and last child will free it (below)
  22.226 +   if( ownTaskStub->numLiveChildTasks   == 0 &&
  22.227 +       ownTaskStub->numLiveChildThreads == 0 )
  22.228 +      free_task_stub( ownTaskStub );
  22.229 +   else
  22.230 +      ownTaskStub->isEnded = TRUE; //for children to see when they end
  22.231 +
  22.232 +      //Now, check on parents waiting on child threads to end
  22.233 +   if( parentTaskStub->isWaitingForChildThreadsToEnd &&
  22.234 +       parentTaskStub->numLiveChildThreads == 0 )
  22.235 +    { parentTaskStub->isWaitingForChildThreadsToEnd = FALSE;
  22.236 +      if( parentTaskStub->isWaitingForChildTasksToEnd )
  22.237 +        return; //still waiting on tasks (should be impossible)
  22.238 +      else //parent free to resume
  22.239 +        resume_slaveVP( PR_PI__give_slave_assigned_to(parentTaskStub), semEnv );
  22.240 +    }
  22.241 +   
  22.242 +      //check if this is last child of ended parent (note, not possible to
  22.243 +      // have more than one level of ancestor waiting to be freed)
  22.244 +   if( parentTaskStub->isEnded &&
  22.245 +       parentTaskStub->numLiveChildTasks   == 0 && 
  22.246 +       parentTaskStub->numLiveChildThreads == 0 )
  22.247 +    { free_task_stub( parentTaskStub ); //just stub, semData already freed
  22.248 +    }
  22.249 +
  22.250 + FreeSlaveStateAndReturn:
  22.251 +      //Used to free the semData and requesting slave's base state, but
  22.252 +      // now PR does those things, so nothing more to do..
  22.253 +//PR handles this:   PR_PI__free( semData );
  22.254 +//PR handles this:   PR_PI__dissipate_slaveVP( requestingSlv );
  22.255 +   return; 
  22.256 + }
  22.257 +
  22.258 +/*Register this with PR, during PRServ start
  22.259 + *
  22.260 + *At some point, may change PR so that it recycles semData, in which case this
  22.261 + * only gets called when a process shuts down..  at that point, PR will call
  22.262 + * dissipate on all the slaves it has in the recycle Q.
  22.263 + */
  22.264 +void
  22.265 +freePRServSemData( void *_semData )
  22.266 + { //
  22.267 +   PR_PI__free( _semData );
  22.268 + }
  22.269 +
  22.270 +void resetPRServSemData( void *_semData ) 
  22.271 + { PRServSemData *semData = (PRServSemData *)_semData;
  22.272 +   
  22.273 +   semData->highestTransEntered = -1;
  22.274 +   semData->lastTransEntered    = NULL;
  22.275 + }
  22.276 +
  22.277 +//==========================================================================
  22.278 +//
  22.279 +//
  22.280 +/*Submit Task
  22.281 + *
  22.282 + *PR creates a PRMetaTask and passes it in.  This handler adds language-
  22.283 + * specific stuff to it.  The language-specific stuff is linked to the
  22.284 + * PRMetaTask, but if the task is suspended for any reason, the lang-specific
  22.285 + * part is moved to the semData of the slave that is animating the task.
  22.286 + *So, while the PRMetaTask is inside the creating language's semantic
  22.287 + * env, waiting to be assigned to a slave for animation, the lang-specific
  22.288 + * task info is accessed from the PRMetaTask.  But once the task suspends,
  22.289 + * that lang-specific task info transfers to the slave's semData.  All lang
  22.290 + * constructs that want to access it must get it from the semData.
  22.291 + *However, taskEnd still accesses the lang-specific task info from the 
  22.292 + * PRMetaTask, whether it suspended or not..  and the task code can access
  22.293 + * data to be used within the application behavior via 
  22.294 + * PR__give_task_info( animatingSlave ).
  22.295 + *  
  22.296 + *Uses a hash table to match the arg-pointers to each other. So, an
  22.297 + * argument-pointer is one-to-one with a hash-table entry.
  22.298 + * 
  22.299 + *If overlapping region detection is enabled, then a hash entry is one
  22.300 + * link in a ring of all entries that overlap each other.  For example,
  22.301 + * say region A shared common addresses with region B, but the pointers
  22.302 + * to them are different, then the hash entries for the two would be
  22.303 + * linked in a ring.  When a pointer is processed, all the pointers in
  22.304 + * the ring are processed (Doesn't differentiate independent siblings
  22.305 + * from parent-child or conjoined twins overlap..)
  22.306 + * NOT ENABLED AS OF MAY 25 2012
  22.307 + * 
  22.308 + *A hash entry has a queue of tasks that are waiting to access the
  22.309 + * pointed-to  region.  The queue goes in the order of creation of
  22.310 + * the tasks.  Each entry in the queue has a pointer to the task-stub
  22.311 + * and whether the task reads-only vs writes to the hash-entry's region.
  22.312 + * 
  22.313 + *A hash entry also has a count of the enabled but not yet finished readers
  22.314 + * of the region. It also has a flag that says whether a writer has been
  22.315 + * enabled and is not yet finished.
  22.316 + * 
  22.317 + *There are two kinds of events that access a hash entry: creation of a
  22.318 + * task and end of a task.
  22.319 + *
  22.320 + * 
  22.321 + * ==========================  creation  ========================== 
  22.322 + * 
  22.323 + *At creation, make a task-stub.  Set the count of blocking propendents
  22.324 + * to the number of controlled arguments (a task can have
  22.325 + * arguments that are not controlled by the language, like simple integer
  22.326 + * inputs from the sequential portion. Note that all controlled arguments
  22.327 + * are pointers, and marked as controlled in the application code).
  22.328 + * 
  22.329 + *The controlled arguments are then processed one by one.
  22.330 + *Processing an argument means getting the hash of the pointer.  Then,
  22.331 + * looking up the hash entry.  (If none, create one).
  22.332 + *With the hash entry:
  22.333 + *
  22.334 + *If the arg is a reader, and the entry does not have an enabled
  22.335 + * non-finished writer, and the queue is empty (could be prev readers,
  22.336 + * then a writer that got queued and now new readers that have to also be
  22.337 + * queued).
  22.338 + *The reader is free.  So, decrement the blocking-propendent count in
  22.339 + * the task-stub. If the count is zero, then put the task-stub into the
  22.340 + * readyQ.
  22.341 + *At the same time, increment the hash-entry's count of enabled and
  22.342 + * non-finished readers. 
  22.343 + * 
  22.344 + *Otherwise, the reader is put into the hash-entry's Q of waiters
  22.345 + * 
  22.346 + *If the arg is a writer, plus the entry does not have a current writer,
  22.347 + * plus the number of enabled non-finished readers is zero, plus the Q is
  22.348 + * empty, then the writer is free.  Mark the entry has having an
  22.349 + * enabled and non-finished writer.  Decrement the blocking-propendent
  22.350 + * count in the writer's task-stub. If the count is zero, then put the
  22.351 + * task-stub into the readyQ.
  22.352 + * 
  22.353 + *Otherwise, put the writer into the entry's Q of waiters.
  22.354 + * 
  22.355 + *No matter what, if the hash entry was chained, put it at the start of
  22.356 + * the chain.  (Means no-longer-used pointers accumulate at end of chain,
  22.357 + * decide garbage collection of no-longer-used pointers later)
  22.358 + *
  22.359 + */
  22.360 +inline 
  22.361 +void *
  22.362 +handleSubmitTask( PRServSemReq *semReq, PRServSemEnv *semEnv )
  22.363 + { uint32           key[3];
  22.364 +   HashEntry       *rawHashEntry; //has char *, but use with uint32 *
  22.365 +   PRServPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
  22.366 +   void           **args;
  22.367 +   PRServTaskStub     *taskStub, *parentTaskStub;
  22.368 +   PRServTaskType     *taskType;
  22.369 +   PRServTaskStubCarrier *taskCarrier;
  22.370 +   
  22.371 +   HashTable *
  22.372 +   argPtrHashTbl = semEnv->argPtrHashTbl;
  22.373 +   
  22.374 + 
  22.375 +   /* ==========================  creation  ========================== 
  22.376 +    *Make a task-stub.  Set the count of blocking propendents
  22.377 +    * to the number of controlled arguments (a task can have
  22.378 +    * arguments that are not controlled by the language, like simple integer
  22.379 +    * inputs from the sequential portion. Note that all controlled arguments
  22.380 +    * are pointers, and marked as controlled in the application code).
  22.381 +    */
  22.382 +   args     = semReq->args;
  22.383 +   taskType = semReq->taskType; //this is PRServ task type struct
  22.384 +   taskStub = create_task_stub( taskType, args );//copies arg ptrs
  22.385 +   
  22.386 +   taskStub->numBlockingProp = taskType->numCtldArgs;
  22.387 +   //PR does this (metaTask contains taskID): taskStub->taskID = semReq->taskID;
  22.388 +      
  22.389 +   parentTaskStub = (PRServTaskStub *)PR_PI__give_lang_meta_task(semReq->callingSlv, PRServ_MAGIC_NUMBER);
  22.390 +   taskStub->parentTaskStub = parentTaskStub; 
  22.391 +   parentTaskStub->numLiveChildTasks += 1;
  22.392 +   
  22.393 +         //DEBUG__printf3(dbgRqstHdlr,"Submit req from slaveID: %d, from task: %d, for task: %d", semReq->callingSlv->slaveID, parentSemData->taskStub->taskID[1], taskStub->taskID[1])
  22.394 +         DEBUG__printf2(dbgRqstHdlr,"Submit req from slaveID: %d, for task: %d", semReq->callingSlv->slaveID, taskStub->taskID[1])
  22.395 +          
  22.396 +   /*=============== Process args =================
  22.397 +    *The controlled arguments are processed one by one.
  22.398 +    *Processing an argument means getting the hash of the pointer.  Then,
  22.399 +    * looking up the hash entry.  (If none, create one).
  22.400 +    */
  22.401 +   int32 argNum;
  22.402 +   for( argNum = 0; argNum < taskType->numCtldArgs; argNum++ )
  22.403 +    { 
  22.404 +      key[0] = 2; //two 32b values in key
  22.405 +      *( (uint64*)&key[1]) = (uint64)args[argNum];  //write 64b into two 32b
  22.406 +
  22.407 +      /*If the hash entry was chained, put it at the
  22.408 +       * start of the chain.  (Means no-longer-used pointers accumulate
  22.409 +       * at end of chain, decide garbage collection later) */
  22.410 +      rawHashEntry = getEntryFromTable32( key, argPtrHashTbl );
  22.411 +      if( rawHashEntry == NULL )
  22.412 +       {    //adding a value auto-creates the hash-entry
  22.413 +         ptrEntry = create_pointer_entry();
  22.414 +         rawHashEntry = addValueIntoTable32( key, ptrEntry, argPtrHashTbl );
  22.415 +       }
  22.416 +      else
  22.417 +       { ptrEntry = (PRServPointerEntry *)rawHashEntry->content;
  22.418 +         if( ptrEntry == NULL )
  22.419 +          { ptrEntry = create_pointer_entry();
  22.420 +            rawHashEntry = addValueIntoTable32(key, ptrEntry, argPtrHashTbl);
  22.421 +          }
  22.422 +       }
  22.423 +      taskStub->ptrEntries[argNum] = ptrEntry;
  22.424 +      
  22.425 +      /*Have the hash entry.
  22.426 +       *If the arg is a reader and the entry does not have an enabled
  22.427 +       * non-finished writer, and the queue is empty. */
  22.428 +      if( taskType->argTypes[argNum] == READER )
  22.429 +       { if( !ptrEntry->hasEnabledNonFinishedWriter && 
  22.430 +             isEmptyPrivQ( ptrEntry->waitersQ ) )
  22.431 +          { /*The reader is free.  So, decrement the blocking-propendent
  22.432 +             * count in the task-stub. If the count is zero, then put the
  22.433 +             * task-stub into the readyQ.  At the same time, increment
  22.434 +             * the hash-entry's count of enabled and non-finished readers.*/
  22.435 +            taskStub->numBlockingProp -= 1;
  22.436 +            if( taskStub->numBlockingProp == 0 )
  22.437 +             { writePrivQ( taskStub, semEnv->taskReadyQ );
  22.438 +               if( semEnv->protoSemEnv->hasWork != TRUE ) 
  22.439 +                   semEnv->protoSemEnv->hasWork = TRUE;
  22.440 +             }
  22.441 +            ptrEntry->numEnabledNonDoneReaders += 1;
  22.442 +          }
  22.443 +         else
  22.444 +          { /*Otherwise, the reader is put into the hash-entry's Q of
  22.445 +             * waiters*/
  22.446 +            taskCarrier = create_task_carrier( taskStub, argNum, READER );
  22.447 +            writePrivQ( taskCarrier, ptrEntry->waitersQ );
  22.448 +          }
  22.449 +       }
  22.450 +      else //arg is a writer
  22.451 +       { /*the arg is a writer, plus the entry does not have a current
  22.452 +          * writer, plus the number of enabled non-finished readers is
  22.453 +          * zero, (the Q must be empty, else bug!) then the writer is free*/
  22.454 +         if( !ptrEntry->hasEnabledNonFinishedWriter &&
  22.455 +              ptrEntry->numEnabledNonDoneReaders == 0 )
  22.456 +          { /*Mark the entry has having a enabled and non-finished writer.
  22.457 +              * Decrement the blocking-propenden count in the writer's
  22.458 +              * task-stub. If the count is zero, then put the task-stub
  22.459 +              * into the readyQ.*/
  22.460 +            taskStub->numBlockingProp -= 1;
  22.461 +            if( taskStub->numBlockingProp == 0 )
  22.462 +             { writePrivQ( taskStub, semEnv->taskReadyQ );
  22.463 +               if( semEnv->protoSemEnv->hasWork != TRUE ) 
  22.464 +                   semEnv->protoSemEnv->hasWork = TRUE;
  22.465 +             }
  22.466 +            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
  22.467 +          }
  22.468 +         else
  22.469 +          {/*Otherwise, put the writer into the entry's Q of waiters.*/
  22.470 +            taskCarrier = create_task_carrier( taskStub, argNum, WRITER );
  22.471 +            writePrivQ( taskCarrier, ptrEntry->waitersQ );            
  22.472 +          }
  22.473 +       }
  22.474 +    } //for argNum
  22.475 +   
  22.476 +      //resume the parent, creator
  22.477 +   resume_slaveVP( semReq->callingSlv, semEnv );
  22.478 +
  22.479 +   return;
  22.480 + }
  22.481 +
  22.482 +
  22.483 +/* ========================== end of task ===========================
  22.484 + * 
  22.485 + *At the end of a task,
  22.486 + *The task's controlled arguments are processed one by one.
  22.487 + *Processing an argument means getting the hash of the pointer.  Then,
  22.488 + * looking up the hash entry (and putting the entry at the start of the
  22.489 + * chain, if there was a chain).
  22.490 + *With the hash entry:
  22.491 + *
  22.492 + *If the arg is a reader, then decrement the enabled and non-finished
  22.493 + * reader-count in the hash-entry. If the count becomes zero, then take
  22.494 + * the next entry from the Q. It should be a writer, or else there's a
  22.495 + * bug in this algorithm.
  22.496 + *Set the hash-entry to have an enabled non-finished writer.  Decrement
  22.497 + * the blocking-propendent-count of the writer's task-stub.  If the count
  22.498 + * has reached zero, then put the task-stub into the readyQ.
  22.499 + * 
  22.500 + *If the arg is a writer, then clear the enabled non-finished writer flag
  22.501 + * of the hash-entry. Take the next entry from the waiters Q. 
  22.502 + *If it is a writer, then turn the flag back on.  Decrement the writer's
  22.503 + * blocking-propendent-count in its task-stub.  If it becomes zero, then
  22.504 + * put the task-stub into the readyQ.
  22.505 + *
  22.506 + *If waiter is a reader, then do a loop, getting all waiting readers.
  22.507 + * For each, increment the hash-entry's count of enabled
  22.508 + * non-finished readers.  Decrement the blocking propendents count of the
  22.509 + * reader's task-stub.  If it reaches zero, then put the task-stub into the
  22.510 + * readyQ.
  22.511 + *Repeat until encounter a writer -- put that writer back into the Q.
  22.512 + * 
  22.513 + *May 2012 -- not keeping track of how many references to a given ptrEntry
  22.514 + * exist, so no way to garbage collect..
  22.515 + *TODO: Might be safe to delete an entry when task ends and waiterQ empty
  22.516 + * and no readers and no writers..
  22.517 + */
  22.518 +inline void
  22.519 +handleEndTask( void *langMetaTask, PRServSemReq *semReq, PRServSemEnv *semEnv )
  22.520 + { PRServPointerEntry  *ptrEntry; //contents of hash table entry for an arg pointer
  22.521 +   void            **args;
  22.522 +   PRServSemData       *endingSlvSemData;
  22.523 +   PRServTaskStub      *endingTaskStub, *waitingTaskStub, *parentStub;
  22.524 +   PRServTaskType      *endingTaskType;
  22.525 +   PRServTaskStubCarrier *waitingTaskCarrier;
  22.526 +   PRServPointerEntry **ptrEntries;
  22.527 +         
  22.528 + 
  22.529 +//   endingTaskStub   = (PRServTaskStub *)PR_PI__give_lang_spec_task_info( semReq->callingSlv );
  22.530 +   
  22.531 +   endingTaskStub   = (PRServTaskStub *)langMetaTask;
  22.532 +   args             = endingTaskStub->args;
  22.533 +   endingTaskType   = endingTaskStub->taskType;
  22.534 +   ptrEntries       = endingTaskStub->ptrEntries; //saved in stub when create
  22.535 +   
  22.536 +         DEBUG__printf2(dbgRqstHdlr,"EndTask req from slaveID: %d, task: %d",semReq->callingSlv->slaveID, endingTaskStub->taskID[1])
  22.537 +          
  22.538 +      //"wait" functionality: Check if parent was waiting on this task
  22.539 +   parentStub = endingTaskStub->parentTaskStub;
  22.540 +   parentStub->numLiveChildTasks -= 1;
  22.541 +   if( parentStub->isWaitingForChildTasksToEnd && 
  22.542 +       parentStub->numLiveChildTasks == 0)
  22.543 +    {
  22.544 +      parentStub->isWaitingForChildTasksToEnd = FALSE;
  22.545 +      resume_slaveVP( PR_PI__give_slave_assigned_to(parentStub), semEnv );
  22.546 +    }
  22.547 +   
  22.548 +      //Check if parent ended, and this was last descendent, then free it
  22.549 +   if( parentStub->isEnded && parentStub->numLiveChildTasks == 0 )
  22.550 +    { free_task_stub( parentStub );
  22.551 +    }
  22.552 +   
  22.553 +   
  22.554 +      //Now, update state of dependents and start ready tasks
  22.555 +   /*The task's controlled arguments are processed one by one.
  22.556 +    *Processing an argument means getting arg-pointer's entry.
  22.557 +    */
  22.558 +   int32 argNum;
  22.559 +   for( argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++ )
  22.560 +    {       
  22.561 +      ptrEntry = ptrEntries[argNum];
  22.562 +      //check if the ending task was reader of this arg
  22.563 +      if( endingTaskType->argTypes[argNum] == READER )
  22.564 +       { //then decrement the enabled and non-finished reader-count in
  22.565 +         // the hash-entry. 
  22.566 +         ptrEntry->numEnabledNonDoneReaders -= 1;
  22.567 +         
  22.568 +         //If the count becomes zero, then take the next entry from the Q. 
  22.569 +         //It should be a writer, or else there's a bug in this algorithm.
  22.570 +         if( ptrEntry->numEnabledNonDoneReaders == 0 )
  22.571 +          { waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );
  22.572 +            if( waitingTaskCarrier == NULL ) 
  22.573 +             { //TODO: looks safe to delete the ptr entry at this point 
  22.574 +               continue; //next iter of loop
  22.575 +             }
  22.576 +            if( waitingTaskCarrier->isReader ) 
  22.577 +               PR_App__throw_exception("READER waiting", NULL, NULL);
  22.578 +                   
  22.579 +            waitingTaskStub = waitingTaskCarrier->taskStub;
  22.580 +            
  22.581 +            //Set the hash-entry to have an enabled non-finished writer.
  22.582 +            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
  22.583 +            
  22.584 +            // Decrement the blocking-propendent-count of the writer's
  22.585 +            // task-stub.  If the count has reached zero, then put the
  22.586 +            // task-stub into the readyQ.
  22.587 +            waitingTaskStub->numBlockingProp -= 1;
  22.588 +            if( waitingTaskStub->numBlockingProp == 0 )
  22.589 +             { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
  22.590 +               if( semEnv->protoSemEnv->hasWork != TRUE ) 
  22.591 +                   semEnv->protoSemEnv->hasWork = TRUE;
  22.592 +             }
  22.593 +          }
  22.594 +       }
  22.595 +      else //the ending task is a writer of this arg 
  22.596 +       { //clear the enabled non-finished writer flag of the hash-entry.
  22.597 +         ptrEntry->hasEnabledNonFinishedWriter = FALSE;
  22.598 +         
  22.599 +         //Take the next waiter from the hash-entry's Q.
  22.600 +         waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );
  22.601 +         if( waitingTaskCarrier == NULL )
  22.602 +          { //TODO: looks safe to delete ptr entry at this point
  22.603 +            continue; //go to next iter of loop, done here.
  22.604 +          }
  22.605 +         waitingTaskStub = waitingTaskCarrier->taskStub;
  22.606 +         
  22.607 +         //If task is a writer of this hash-entry's pointer
  22.608 +         if( !waitingTaskCarrier->isReader ) 
  22.609 +          { // then turn the flag back on.
  22.610 +            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
  22.611 +            //Decrement the writer's blocking-propendent-count in task-stub
  22.612 +            // If it becomes zero, then put the task-stub into the readyQ.
  22.613 +            waitingTaskStub->numBlockingProp -= 1;
  22.614 +            if( waitingTaskStub->numBlockingProp == 0 )
  22.615 +             { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
  22.616 +                if( semEnv->protoSemEnv->hasWork != TRUE ) 
  22.617 +                    semEnv->protoSemEnv->hasWork = TRUE;
  22.618 +             }
  22.619 +          }
  22.620 +         else
  22.621 +          { //Waiting task is a reader, so do a loop, of all waiting readers
  22.622 +            // until encounter a writer or waitersQ is empty
  22.623 +            while( TRUE ) //The checks guarantee have a waiting reader
  22.624 +             { //Increment the hash-entry's count of enabled non-finished
  22.625 +               // readers.
  22.626 +               ptrEntry->numEnabledNonDoneReaders += 1;
  22.627 +
  22.628 +               //Decrement the blocking propendents count of the reader's
  22.629 +               // task-stub.  If it reaches zero, then put the task-stub
  22.630 +               // into the readyQ.
  22.631 +               waitingTaskStub->numBlockingProp -= 1;
  22.632 +               if( waitingTaskStub->numBlockingProp == 0 )
  22.633 +                { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
  22.634 +                   if( semEnv->protoSemEnv->hasWork != TRUE ) 
  22.635 +                       semEnv->protoSemEnv->hasWork = TRUE;
  22.636 +                }
  22.637 +               //Get next waiting task
  22.638 +               waitingTaskCarrier = peekPrivQ( ptrEntry->waitersQ );
  22.639 +               if( waitingTaskCarrier == NULL )    break; //no more waiting readers
  22.640 +               if( !waitingTaskCarrier->isReader ) break; //no more waiting readers
  22.641 +               waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );               
  22.642 +               waitingTaskStub = waitingTaskCarrier->taskStub;
  22.643 +             }//while waiter is a reader
  22.644 +          }//if-else, first waiting task is a reader
  22.645 +       }//if-else, check of ending task, whether writer or reader
  22.646 +    }//for argnum in ending task
  22.647 +   
  22.648 +   
  22.649 +      //done ending the task, if still has live children, then keep stub around
  22.650 +      // else, free the stub and args copy
  22.651 +   if( endingTaskStub->numLiveChildTasks   == 0 &&
  22.652 +       endingTaskStub->numLiveChildThreads == 0 )
  22.653 +    { free_task_stub( endingTaskStub ); 
  22.654 +    }
  22.655 +   
  22.656 +   return; 
  22.657 + }
  22.658 +
  22.659 +
  22.660 +inline void
  22.661 +free_task_stub( PRServTaskStub *stubToFree )
  22.662 + { if(stubToFree->ptrEntries != NULL ) //a thread stub has NULL entry
  22.663 +    { PR_PI__free( stubToFree->ptrEntries );
  22.664 +    }
  22.665 +   PR_PI__free( stubToFree );
  22.666 + }
  22.667 +
  22.668 +//========================== Task Comm handlers ===========================
  22.669 +
  22.670 +
  22.671 +
  22.672 +//============================  Send Handlers ==============================
  22.673 +/*Send of Type -- The semantic request has the receiving task ID and Type
  22.674 + *
  22.675 + *Messages of a given Type have to be kept separate..  so need a separate
  22.676 + * entry in the hash table for each pair: receiverID, Type
  22.677 + *
  22.678 + *Also, if same sender sends multiple before any get received, then need to
  22.679 + * stack the sends up -- even if a send waits until it's paired, several
  22.680 + * separate tasks can send to the same receiver, and doing hash on the
  22.681 + * receive task, so they will stack up.
  22.682 + */
  22.683 +inline void
  22.684 +handleSendTypeTo( PRServSemReq *semReq, PRServSemEnv *semEnv )
  22.685 + { SlaveVP    *senderSlv, *receiverSlv;
  22.686 +   int32      *senderID, *receiverID;
  22.687 +   int32      *key, keySz, receiverIDNumInt;
  22.688 +   PRServSemReq  *waitingReq;
  22.689 +   HashEntry  *entry;
  22.690 +   HashTable  *commHashTbl = semEnv->commHashTbl;
  22.691 +   
  22.692 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
  22.693 +   senderSlv   = semReq->senderSlv;
  22.694 +
  22.695 +         DEBUG__printf2(dbgRqstHdlr,"SendType req from sender slaveID: %d, recTask: %d", senderSlv->slaveID, receiverID[1])
  22.696 +          
  22.697 +         
  22.698 +   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
  22.699 +   keySz = receiverIDNumInt * sizeof(int32) + 2 * sizeof(int32);
  22.700 +   key = PR_PI__malloc( keySz );
  22.701 +   key[0] = receiverIDNumInt + 1; //loc 0 is num int32 in key
  22.702 +   memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
  22.703 +   key[ 1 + receiverIDNumInt ] = semReq->msgType; 
  22.704 +   
  22.705 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
  22.706 +   if( entry == NULL ) //was just inserted, means task has to wait
  22.707 +    { return;
  22.708 +    }
  22.709 +
  22.710 +      //if here, found a waiting request with same key
  22.711 +   waitingReq = (PRServSemReq *)entry->content;
  22.712 +
  22.713 +      //At this point, know have waiting request(s) -- either sends or recv
  22.714 +      //Note, can only have max of one receive waiting, and cannot have both
  22.715 +      // sends and receives waiting (they would have paired off)
  22.716 +      // but can have multiple sends from diff sending VPs, all same msg-type
  22.717 +   if( waitingReq->reqType == send_type_to )
  22.718 +    {    //waiting request is another send, so stack this up on list
  22.719 +         // but first clone the sending request so it persists.
  22.720 +      PRServSemReq *clonedReq = cloneReq( semReq );
  22.721 +      clonedReq-> nextReqInHashEntry = waitingReq->nextReqInHashEntry;
  22.722 +      waitingReq->nextReqInHashEntry = clonedReq;
  22.723 +         DEBUG__printf2( dbgRqstHdlr, "linked requests: %p, %p ", clonedReq,\
  22.724 +                                                                 waitingReq )
  22.725 +      return;
  22.726 +    }
  22.727 +   else
  22.728 +    {    
  22.729 +       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  22.730 +        Dependency newD;
  22.731 +        newD.from_vp = senderID->slaveID;
  22.732 +        newD.from_task = senderID->numTimesAssignedToASlot;
  22.733 +        newD.to_vp = receiverID->slaveID;
  22.734 +        newD.to_task = receiverID->numTimesAssignedToASlot +1;
  22.735 +        //(newD,semEnv->commDependenciesList);  
  22.736 +        addToListOfArrays(Dependency,newD,semEnv->dynDependenciesList);  
  22.737 +                int32 groupId = semReq->msgType;
  22.738 +        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
  22.739 +            makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
  22.740 +        }
  22.741 +        if(semEnv->ntonGroups[groupId] == NULL){
  22.742 +            semEnv->ntonGroups[groupId] = new_NtoN(groupId);
  22.743 +        }
  22.744 +        Unit u;
  22.745 +        u.vp = senderID->slaveID;
  22.746 +        u.task = senderID->numTimesAssignedToASlot;
  22.747 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
  22.748 +        u.vp = receiverID->slaveID;
  22.749 +        u.task = receiverID->numTimesAssignedToASlot +1;
  22.750 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
  22.751 +       #endif
  22.752 +
  22.753 +         //set receiver slave, from the waiting request
  22.754 +      receiverSlv = waitingReq->receiverSlv;
  22.755 +      
  22.756 +         //waiting request is a receive_type_to, so it pairs to this send
  22.757 +         //First, remove the waiting receive request from the entry
  22.758 +      entry->content = waitingReq->nextReqInHashEntry;
  22.759 +      PR_PI__free( waitingReq ); //Don't use contents -- so free it
  22.760 +      
  22.761 +      if( entry->content == NULL )
  22.762 +       {    //TODO: mod hash table to double-link, so can delete entry from
  22.763 +            // table without hashing the key and looking it up again
  22.764 +         deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees hashEntry
  22.765 +       }
  22.766 +      
  22.767 +         //attach msg that's in this send request to receiving task's Slv
  22.768 +         // when comes back from suspend will have msg in dataRetFromReq
  22.769 +      receiverSlv->dataRetFromReq = semReq->msg;
  22.770 +
  22.771 +         //bring both processors back from suspend
  22.772 +      resume_slaveVP( senderSlv,   semEnv );
  22.773 +      resume_slaveVP( receiverSlv, semEnv );
  22.774 +
  22.775 +      return;
  22.776 +    }
  22.777 + }
  22.778 +
  22.779 +
  22.780 +/*If Send or Receive are called within a task, it causes the task to suspend,
  22.781 + * which converts the slave animating it to a free slave and suspends that slave.
  22.782 + *Which means that send and receive operate upon slaves, no matter whether they
  22.783 + * were called from within a task or a slave.
  22.784 + *  
  22.785 + *Looks like can make single handler for both kinds of send..
  22.786 + */
  22.787 +//TODO: combine both send handlers into single handler
  22.788 +inline void
  22.789 +handleSendFromTo( PRServSemReq *semReq, PRServSemEnv *semEnv)
  22.790 + { SlaveVP     *senderSlv, *receiverSlv;
  22.791 +   int32       *senderID, *receiverID;
  22.792 +   int32       *key, keySz, receiverIDNumInt, senderIDNumInt;
  22.793 +   PRServSemReq   *waitingReq;
  22.794 +   HashEntry   *entry;
  22.795 +   HashTable   *commHashTbl = semEnv->commHashTbl;
  22.796 +
  22.797 +         DEBUG__printf2(dbgRqstHdlr,"SendFromTo req from task %d to %d",
  22.798 +                        semReq->senderID[1],semReq->receiverID[1])
  22.799 +   
  22.800 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
  22.801 +   senderID    = semReq->senderID;
  22.802 +   senderSlv   = semReq->senderSlv;
  22.803 +
  22.804 +   
  22.805 +   receiverIDNumInt = receiverID[0] + 1; //include the count in the key
  22.806 +   senderIDNumInt   = senderID[0] + 1;
  22.807 +   keySz = (receiverIDNumInt + senderIDNumInt) * sizeof(int32) + sizeof(int32);
  22.808 +   key   = PR_PI__malloc( keySz );
  22.809 +   key[0] = receiverIDNumInt + senderIDNumInt;
  22.810 +   memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
  22.811 +   memcpy( &key[1 + receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32) );
  22.812 +
  22.813 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
  22.814 +   if( entry == NULL ) //was just inserted, means task has to wait
  22.815 +    { return;
  22.816 +    }
  22.817 +
  22.818 +   waitingReq = (PRServSemReq *)entry->content;
  22.819 +
  22.820 +      //At this point, know have waiting request(s) -- either sends or recv
  22.821 +   if( waitingReq->reqType == send_from_to )
  22.822 +    { printf("\n ERROR: shouldn't be two send-from-tos waiting \n");
  22.823 +    }
  22.824 +   else
  22.825 +    {    //waiting request is a receive, so it completes pair with this send
  22.826 +      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  22.827 +        Dependency newD;
  22.828 +        newD.from_vp = sendPr->slaveID;
  22.829 +        newD.from_task = sendPr->numTimesAssignedToASlot;
  22.830 +        newD.to_vp = receivePr->slaveID;
  22.831 +        newD.to_task = receivePr->numTimesAssignedToASlot +1;
  22.832 +        //addToListOfArraysDependency(newD,semEnv->commDependenciesList);  
  22.833 +        addToListOfArrays(Dependency,newD,semEnv->commDependenciesList);   
  22.834 +      #endif 
  22.835 +
  22.836 +         //set receiver slave, from the waiting request
  22.837 +      receiverSlv = waitingReq->receiverSlv;
  22.838 +       
  22.839 +         //First, remove the waiting receive request from the entry
  22.840 +      entry->content = waitingReq->nextReqInHashEntry;
  22.841 +      PR_PI__free( waitingReq ); //Don't use contents -- so free it
  22.842 +      
  22.843 +         //can only be one waiting req for "from-to" semantics
  22.844 +      if( entry->content != NULL )
  22.845 +       {
  22.846 +         printf("\nERROR in handleSendFromTo\n");
  22.847 +       }
  22.848 +      deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees HashEntry
  22.849 +
  22.850 +         //attach msg that's in this send request to receiving procr
  22.851 +         // when comes back from suspend, will have msg in dataRetFromReq
  22.852 +      receiverSlv->dataRetFromReq = semReq->msg;
  22.853 +
  22.854 +         //bring both processors back from suspend
  22.855 +      resume_slaveVP( senderSlv,   semEnv );
  22.856 +      resume_slaveVP( receiverSlv, semEnv );
  22.857 +            
  22.858 +      return;
  22.859 +    }
  22.860 + }
  22.861 +
  22.862 +
  22.863 +
  22.864 +//==============================  Receives  ===========================
  22.865 +//
  22.866 +
  22.867 +
  22.868 +inline void
  22.869 +handleReceiveTypeTo( PRServSemReq *semReq, PRServSemEnv *semEnv)
  22.870 + { SlaveVP    *senderSlv, *receiverSlv;
  22.871 +   int32      *receiverID;
  22.872 +   int32      *key, keySz, receiverIDNumInt;
  22.873 +   PRServSemReq  *waitingReq;
  22.874 +   HashEntry  *entry;
  22.875 +   HashTable  *commHashTbl = semEnv->commHashTbl;
  22.876 +   
  22.877 +         DEBUG__printf2(dbgRqstHdlr,"ReceiveType req to ID: %d type: %d",semReq->receiverID[1], semReq->msgType)
  22.878 + 
  22.879 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
  22.880 +   receiverSlv = semReq->receiverSlv;
  22.881 +   
  22.882 +
  22.883 +      //key is the receiverID plus the type -- have to copy them into key
  22.884 +   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
  22.885 +   keySz = receiverIDNumInt * sizeof(int32) + 2 * sizeof(int32);
  22.886 +   key = PR_PI__malloc( keySz );
  22.887 +   key[0] = receiverIDNumInt + 1; //loc 0 is num int32s in key
  22.888 +   memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
  22.889 +   key[ 1 + receiverIDNumInt ] = semReq->msgType; 
  22.890 +
  22.891 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );//clones
  22.892 +   if( entry == NULL ) //was just inserted, means task has to wait
  22.893 +    { return;
  22.894 +    }
  22.895 +
  22.896 +   waitingReq = (PRServSemReq *)entry->content;  //previously cloned by insert
  22.897 +
  22.898 +      //At this point, know have waiting request(s) -- should be send(s)
  22.899 +   if( waitingReq->reqType == send_type_to )
  22.900 +    {    
  22.901 +         //set sending slave  from the request
  22.902 +      senderSlv = waitingReq->senderSlv;
  22.903 +      
  22.904 +         //waiting request is a send, so pair it with this receive
  22.905 +         //first, remove the waiting send request from the list in entry
  22.906 +      entry->content = waitingReq->nextReqInHashEntry;
  22.907 +      if( entry->content == NULL )
  22.908 +       { deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees HashEntry
  22.909 +       }
  22.910 +      
  22.911 +         //attach msg that's in the send request to receiving procr
  22.912 +         // when comes back from suspend, will have msg in dataRetFromReq
  22.913 +      receiverSlv->dataRetFromReq = waitingReq->msg;
  22.914 +
  22.915 +         //bring both processors back from suspend
  22.916 +      PR_PI__free( waitingReq );
  22.917 +
  22.918 +       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  22.919 +        Dependency newD;
  22.920 +        newD.from_vp = sendPr->slaveID;
  22.921 +        newD.from_task = sendPr->numTimesAssignedToASlot;
  22.922 +        newD.to_vp = receivePr->slaveID;
  22.923 +        newD.to_task = receivePr->numTimesAssignedToASlot +1;
  22.924 +        //addToListOfArraysDependency(newD,semEnv->commDependenciesList);  
  22.925 +        addToListOfArrays(Dependency,newD,semEnv->dynDependenciesList); 
  22.926 +        int32 groupId = semReq->msgType;
  22.927 +        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
  22.928 +            makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
  22.929 +        }
  22.930 +        if(semEnv->ntonGroups[groupId] == NULL){
  22.931 +            semEnv->ntonGroups[groupId] = new_NtoN(groupId);
  22.932 +        }
  22.933 +        Unit u;
  22.934 +        u.vp = sendPr->slaveID;
  22.935 +        u.task = sendPr->numTimesAssignedToASlot;
  22.936 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
  22.937 +        u.vp = receivePr->slaveID;
  22.938 +        u.task = receivePr->numTimesAssignedToASlot +1;
  22.939 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
  22.940 +       #endif
  22.941 +      
  22.942 +      resume_slaveVP( senderSlv,   semEnv );
  22.943 +      resume_slaveVP( receiverSlv, semEnv );
  22.944 +
  22.945 +      return;
  22.946 +    }
  22.947 +   printf("\nLang Impl Error: Should never be two waiting receives!\n");
  22.948 + }
  22.949 +
  22.950 +
  22.951 +/*
  22.952 + */
  22.953 +inline void
  22.954 +handleReceiveFromTo( PRServSemReq *semReq, PRServSemEnv *semEnv)
  22.955 + { SlaveVP     *senderSlv, *receiverSlv;
  22.956 +   int32       *senderID,  *receiverID;
  22.957 +   int32       *key, keySz, receiverIDNumInt, senderIDNumInt;
  22.958 +   PRServSemReq   *waitingReq;
  22.959 +   HashEntry   *entry;
  22.960 +   HashTable   *commHashTbl = semEnv->commHashTbl;
  22.961 +
  22.962 +         DEBUG__printf2(dbgRqstHdlr,"RecFromTo req from ID: %d to ID: %d",semReq->senderID[1],semReq->receiverID[1])
  22.963 +   
  22.964 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
  22.965 +   senderID    = semReq->senderID;
  22.966 +   receiverSlv = semReq->receiverSlv;
  22.967 +   
  22.968 +   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
  22.969 +   senderIDNumInt   = senderID[0] + 1;
  22.970 +   keySz = (receiverIDNumInt + senderIDNumInt) * sizeof(int32) + sizeof(int32);
  22.971 +   key = PR_PI__malloc( keySz );
  22.972 +   key[0] = receiverIDNumInt + senderIDNumInt; //loc 0 is num int32s in key
  22.973 +   memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
  22.974 +   memcpy( &key[1 + receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32));
  22.975 +
  22.976 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
  22.977 +   if( entry == NULL ) //was just inserted, means task has to wait
  22.978 +    { return;
  22.979 +    }
  22.980 +
  22.981 +   waitingReq = (PRServSemReq *)entry->content;
  22.982 +
  22.983 +      //At this point, know have a request to rendez-vous -- should be send
  22.984 +   if( waitingReq->reqType == send_from_to )
  22.985 +    {    //waiting request is a send, so pair it with this receive
  22.986 +      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  22.987 +        Dependency newD;
  22.988 +        newD.from_vp = sendPr->slaveID;
  22.989 +        newD.from_task = sendPr->numTimesAssignedToASlot;
  22.990 +        newD.to_vp = receivePr->slaveID;
  22.991 +        newD.to_task = receivePr->numTimesAssignedToASlot +1;
  22.992 +        //addToListOfArraysDependency(newD,semEnv->commDependenciesList);  
  22.993 +        addToListOfArrays(Dependency,newD,semEnv->commDependenciesList);    
  22.994 +      #endif
  22.995 +      
  22.996 +         //have receiver slave, now set sender slave
  22.997 +      senderSlv = waitingReq->senderSlv;
  22.998 +      
  22.999 +         //For from-to, should only ever be a single reqst waiting tobe paird
 22.1000 +      entry->content = waitingReq->nextReqInHashEntry;
 22.1001 +      if( entry->content != NULL ) printf("\nERROR in handleRecvFromTo\n");
 22.1002 +      deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees entry too
 22.1003 +
 22.1004 +         //attach msg that's in the send request to receiving procr
 22.1005 +         // when comes back from suspend, will have msg in dataRetFromReq
 22.1006 +      receiverSlv->dataRetFromReq = waitingReq->msg;
 22.1007 +
 22.1008 +         //bring both processors back from suspend
 22.1009 +      PR_PI__free( waitingReq );
 22.1010 +
 22.1011 +      resume_slaveVP( senderSlv,   semEnv );
 22.1012 +      resume_slaveVP( receiverSlv, semEnv );
 22.1013 +
 22.1014 +      return;
 22.1015 +    }
 22.1016 +   printf("\nLang Impl Error: Should never be two waiting receives!\n");
 22.1017 + }
 22.1018 +
 22.1019 +
 22.1020 +/*Waits for all tasks that are direct children to end, then resumes calling
 22.1021 + * task or thread
 22.1022 + */
 22.1023 +inline void
 22.1024 +handleTaskwait( PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv)
 22.1025 + { PRServTaskStub* taskStub;
 22.1026 + 
 22.1027 +            DEBUG__printf1(dbgRqstHdlr,"Taskwait request from processor %d",
 22.1028 +                                                      requestingSlv->slaveID)
 22.1029 +    
 22.1030 +   taskStub = (PRServTaskStub *)PR_PI__give_lang_meta_task( requestingSlv, PRServ_MAGIC_NUMBER);
 22.1031 +   
 22.1032 +   if( taskStub->numLiveChildTasks == 0 )
 22.1033 +    {    //nobody to wait for, resume
 22.1034 +      resume_slaveVP( requestingSlv, semEnv );
 22.1035 +    }
 22.1036 +   else  //have to wait, mark waiting
 22.1037 +    {        
 22.1038 +      taskStub->isWaitingForChildTasksToEnd = TRUE;
 22.1039 +    }    
 22.1040 + }
 22.1041 +
 22.1042 +
 22.1043 +//==========================================================================
 22.1044 +/*
 22.1045 + */
 22.1046 +void
 22.1047 +handleMalloc( PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv )
 22.1048 + { void *ptr;
 22.1049 + 
 22.1050 +      DEBUG__printf1(dbgRqstHdlr,"Malloc request from processor %d",requestingSlv->slaveID)
 22.1051 +
 22.1052 +   ptr = PR_PI__malloc( semReq->sizeToMalloc );
 22.1053 +   requestingSlv->dataRetFromReq = ptr;
 22.1054 +   resume_slaveVP( requestingSlv, semEnv );
 22.1055 + }
 22.1056 +
 22.1057 +/*
 22.1058 + */
 22.1059 +void
 22.1060 +handleFree( PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv )
 22.1061 + {
 22.1062 +         DEBUG__printf1(dbgRqstHdlr,"Free request from processor %d",requestingSlv->slaveID)
 22.1063 +   PR_PI__free( semReq->ptrToFree );
 22.1064 +   resume_slaveVP( requestingSlv, semEnv );
 22.1065 + }
 22.1066 +
 22.1067 +
 22.1068 +//===========================================================================
 22.1069 +//
 22.1070 +/*Uses ID as index into array of flags.  If flag already set, resumes from
 22.1071 + * end-label.  Else, sets flag and resumes normally.
 22.1072 + */
 22.1073 +void inline
 22.1074 +handleStartSingleton_helper( PRServSingleton *singleton, SlaveVP *reqstingSlv,
 22.1075 +                             PRServSemEnv    *semEnv )
 22.1076 + {
 22.1077 +   if( singleton->hasFinished )
 22.1078 +    {    //the code that sets the flag to true first sets the end instr addr
 22.1079 +      reqstingSlv->dataRetFromReq = singleton->endInstrAddr;
 22.1080 +      resume_slaveVP( reqstingSlv, semEnv );
 22.1081 +      return;
 22.1082 +    }
 22.1083 +   else if( singleton->hasBeenStarted )
 22.1084 +    {    //singleton is in-progress in a diff slave, so wait for it to finish
 22.1085 +      writePrivQ(reqstingSlv, singleton->waitQ );
 22.1086 +      return;
 22.1087 +    }
 22.1088 +   else
 22.1089 +    {    //hasn't been started, so this is the first attempt at the singleton
 22.1090 +      singleton->hasBeenStarted = TRUE;
 22.1091 +      reqstingSlv->dataRetFromReq = 0x0;
 22.1092 +      resume_slaveVP( reqstingSlv, semEnv );
 22.1093 +      return;
 22.1094 +    }
 22.1095 + }
 22.1096 +void inline
 22.1097 +handleStartFnSingleton( PRServSemReq *semReq, SlaveVP *requestingSlv,
 22.1098 +                      PRServSemEnv *semEnv )
 22.1099 + { PRServSingleton *singleton;
 22.1100 +         DEBUG__printf1(dbgRqstHdlr,"StartFnSingleton request from processor %d",requestingSlv->slaveID)
 22.1101 +
 22.1102 +   singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
 22.1103 +   handleStartSingleton_helper( singleton, requestingSlv, semEnv );
 22.1104 + }
 22.1105 +void inline
 22.1106 +handleStartDataSingleton( PRServSemReq *semReq, SlaveVP *requestingSlv,
 22.1107 +                      PRServSemEnv *semEnv )
 22.1108 + { PRServSingleton *singleton;
 22.1109 +
 22.1110 +         DEBUG__printf1(dbgRqstHdlr,"StartDataSingleton request from processor %d",requestingSlv->slaveID)
 22.1111 +   if( *(semReq->singletonPtrAddr) == NULL )
 22.1112 +    { singleton                 = PR_PI__malloc( sizeof(PRServSingleton) );
 22.1113 +      singleton->waitQ          = makePRQ();
 22.1114 +      singleton->endInstrAddr   = 0x0;
 22.1115 +      singleton->hasBeenStarted = FALSE;
 22.1116 +      singleton->hasFinished    = FALSE;
 22.1117 +      *(semReq->singletonPtrAddr)  = singleton;
 22.1118 +    }
 22.1119 +   else
 22.1120 +      singleton = *(semReq->singletonPtrAddr);
 22.1121 +   handleStartSingleton_helper( singleton, requestingSlv, semEnv );
 22.1122 + }
 22.1123 +
 22.1124 +
 22.1125 +void inline
 22.1126 +handleEndSingleton_helper( PRServSingleton *singleton, SlaveVP *requestingSlv,
 22.1127 +                           PRServSemEnv    *semEnv )
 22.1128 + { PrivQueueStruc *waitQ;
 22.1129 +   int32           numWaiting, i;
 22.1130 +   SlaveVP      *resumingSlv;
 22.1131 +
 22.1132 +   if( singleton->hasFinished )
 22.1133 +    { //by definition, only one slave should ever be able to run end singleton
 22.1134 +      // so if this is true, is an error
 22.1135 +      ERROR1( "singleton code ran twice", requestingSlv );
 22.1136 +    }
 22.1137 +
 22.1138 +   singleton->hasFinished = TRUE;
 22.1139 +   waitQ = singleton->waitQ;
 22.1140 +   numWaiting = numInPrivQ( waitQ );
 22.1141 +   for( i = 0; i < numWaiting; i++ )
 22.1142 +    {    //they will resume inside start singleton, then jmp to end singleton
 22.1143 +      resumingSlv = readPrivQ( waitQ );
 22.1144 +      resumingSlv->dataRetFromReq = singleton->endInstrAddr;
 22.1145 +      resume_slaveVP( resumingSlv, semEnv );
 22.1146 +    }
 22.1147 +
 22.1148 +   resume_slaveVP( requestingSlv, semEnv );
 22.1149 +
 22.1150 +}
 22.1151 +void inline
 22.1152 +handleEndFnSingleton( PRServSemReq *semReq, SlaveVP *requestingSlv,
 22.1153 +                        PRServSemEnv *semEnv )
 22.1154 + {
 22.1155 +   PRServSingleton   *singleton;
 22.1156 +
 22.1157 +         DEBUG__printf1(dbgRqstHdlr,"EndFnSingleton request from processor %d",requestingSlv->slaveID)
 22.1158 +   
 22.1159 +   singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
 22.1160 +   handleEndSingleton_helper( singleton, requestingSlv, semEnv );
 22.1161 +  }
 22.1162 +void inline
 22.1163 +handleEndDataSingleton( PRServSemReq *semReq, SlaveVP *requestingSlv,
 22.1164 +                        PRServSemEnv *semEnv )
 22.1165 + {
 22.1166 +   PRServSingleton   *singleton;
 22.1167 +
 22.1168 +         DEBUG__printf1(dbgRqstHdlr,"EndDataSingleton request from processor %d",requestingSlv->slaveID)
 22.1169 +   
 22.1170 +   singleton = *(semReq->singletonPtrAddr);
 22.1171 +   handleEndSingleton_helper( singleton, requestingSlv, semEnv );
 22.1172 +  }
 22.1173 +
 22.1174 +
 22.1175 +/*This executes the function in the masterVP, take the function
 22.1176 + * pointer out of the request and call it, then resume the VP.
 22.1177 + */
 22.1178 +void
 22.1179 +handleAtomic( PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv )
 22.1180 + {
 22.1181 +         DEBUG__printf1(dbgRqstHdlr,"Atomic request from processor %d",requestingSlv->slaveID)
 22.1182 +   semReq->fnToExecInMaster( semReq->dataForFn );
 22.1183 +   resume_slaveVP( requestingSlv, semEnv );
 22.1184 + }
 22.1185 +
 22.1186 +/*First, it looks at the VP's semantic data, to see the highest transactionID
 22.1187 + * that VP
 22.1188 + * already has entered.  If the current ID is not larger, it throws an
 22.1189 + * exception stating a bug in the code.
 22.1190 + *Otherwise it puts the current ID
 22.1191 + * there, and adds the ID to a linked list of IDs entered -- the list is
 22.1192 + * used to check that exits are properly ordered.
 22.1193 + *Next it is uses transactionID as index into an array of transaction
 22.1194 + * structures.
 22.1195 + *If the "VP_currently_executing" field is non-null, then put requesting VP
 22.1196 + * into queue in the struct.  (At some point a holder will request
 22.1197 + * end-transaction, which will take this VP from the queue and resume it.)
 22.1198 + *If NULL, then write requesting into the field and resume.
 22.1199 + */
 22.1200 +void
 22.1201 +handleTransStart( PRServSemReq *semReq, SlaveVP *requestingSlv,
 22.1202 +                  PRServSemEnv *semEnv )
 22.1203 + { PRServSemData *semData;
 22.1204 +   TransListElem *nextTransElem;
 22.1205 +
 22.1206 +         DEBUG__printf1(dbgRqstHdlr,"TransStart request from processor %d",requestingSlv->slaveID)
 22.1207 +   
 22.1208 +      //check ordering of entering transactions is correct
 22.1209 +   semData = requestingSlv->semanticData;
 22.1210 +   if( semData->highestTransEntered > semReq->transID )
 22.1211 +    {    //throw PR exception, which shuts down PR.
 22.1212 +      PR_PI__throw_exception( "transID smaller than prev", requestingSlv, NULL);
 22.1213 +    }
 22.1214 +      //add this trans ID to the list of transactions entered -- check when
 22.1215 +      // end a transaction
 22.1216 +   semData->highestTransEntered = semReq->transID;
 22.1217 +   nextTransElem = PR_PI__malloc( sizeof(TransListElem) );
 22.1218 +   nextTransElem->transID = semReq->transID;
 22.1219 +   nextTransElem->nextTrans = semData->lastTransEntered;
 22.1220 +   semData->lastTransEntered = nextTransElem;
 22.1221 +
 22.1222 +      //get the structure for this transaction ID
 22.1223 +   PRServTrans *
 22.1224 +   transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
 22.1225 +
 22.1226 +   if( transStruc->VPCurrentlyExecuting == NULL )
 22.1227 +    {
 22.1228 +      transStruc->VPCurrentlyExecuting = requestingSlv;
 22.1229 +      resume_slaveVP( requestingSlv, semEnv );
 22.1230 +    }
 22.1231 +   else
 22.1232 +    {    //note, might make future things cleaner if save request with VP and
 22.1233 +         // add this trans ID to the linked list when gets out of queue.
 22.1234 +         // but don't need for now, and lazy..
 22.1235 +      writePrivQ( requestingSlv, transStruc->waitingVPQ );
 22.1236 +    }
 22.1237 + }
 22.1238 +
 22.1239 +
 22.1240 +/*Use the trans ID to get the transaction structure from the array.
 22.1241 + *Look at VP_currently_executing to be sure it's same as requesting VP.
 22.1242 + * If different, throw an exception, stating there's a bug in the code.
 22.1243 + *Next, take the first element off the list of entered transactions.
 22.1244 + * Check to be sure the ending transaction is the same ID as the next on
 22.1245 + * the list.  If not, incorrectly nested so throw an exception.
 22.1246 + *
 22.1247 + *Next, get from the queue in the structure.
 22.1248 + *If it's empty, set VP_currently_executing field to NULL and resume
 22.1249 + * requesting VP.
 22.1250 + *If get somethine, set VP_currently_executing to the VP from the queue, then
 22.1251 + * resume both.
 22.1252 + */
 22.1253 +void
 22.1254 +handleTransEnd(PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv)
 22.1255 + { PRServSemData    *semData;
 22.1256 +   SlaveVP     *waitingSlv;
 22.1257 +   PRServTrans      *transStruc;
 22.1258 +   TransListElem *lastTrans;
 22.1259 +   
 22.1260 +         DEBUG__printf1(dbgRqstHdlr,"TransEnd request from processor %d",requestingSlv->slaveID)
 22.1261 +   
 22.1262 +   transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
 22.1263 +
 22.1264 +      //make sure transaction ended in same VP as started it.
 22.1265 +   if( transStruc->VPCurrentlyExecuting != requestingSlv )
 22.1266 +    {
 22.1267 +      PR_PI__throw_exception( "trans ended in diff VP", requestingSlv, NULL );
 22.1268 +    }
 22.1269 +
 22.1270 +      //make sure nesting is correct -- last ID entered should == this ID
 22.1271 +   semData = requestingSlv->semanticData;
 22.1272 +   lastTrans = semData->lastTransEntered;
 22.1273 +   if( lastTrans->transID != semReq->transID )
 22.1274 +    {
 22.1275 +      PR_PI__throw_exception( "trans incorrectly nested", requestingSlv, NULL );
 22.1276 +    }
 22.1277 +
 22.1278 +   semData->lastTransEntered = semData->lastTransEntered->nextTrans;
 22.1279 +
 22.1280 +
 22.1281 +   waitingSlv = readPrivQ( transStruc->waitingVPQ );
 22.1282 +   transStruc->VPCurrentlyExecuting = waitingSlv;
 22.1283 +
 22.1284 +   if( waitingSlv != NULL )
 22.1285 +      resume_slaveVP( waitingSlv, semEnv );
 22.1286 +
 22.1287 +   resume_slaveVP( requestingSlv, semEnv );
 22.1288 + }
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/Services_Offered_by_PR/Services_Language/PRServ_Request_Handlers.h	Tue Oct 23 23:46:17 2012 -0700
    23.3 @@ -0,0 +1,60 @@
    23.4 +/*
    23.5 + *  Copyright 2009 OpenSourceStewardshipFoundation.org
    23.6 + *  Licensed under GNU General Public License version 2
    23.7 + *
    23.8 + * Author: seanhalle@yahoo.com
    23.9 + *
   23.10 + */
   23.11 +
   23.12 +#ifndef _PRServ_REQ_H
   23.13 +#define	_PRServ_REQ_H
   23.14 +
   23.15 +#include "PRServ.h"
   23.16 +
   23.17 +/*This header defines everything specific to the PRServ semantic plug-in
   23.18 + */
   23.19 +
   23.20 +inline void
   23.21 +handleSubmitTask( PRServSemReq *semReq, PRServSemEnv *semEnv);
   23.22 +inline void
   23.23 +handleEndTask( PRServSemReq *semReq, PRServSemEnv *semEnv);
   23.24 +inline void
   23.25 +handleSendTypeTo( PRServSemReq *semReq, PRServSemEnv *semEnv);
   23.26 +inline void
   23.27 +handleSendFromTo( PRServSemReq *semReq, PRServSemEnv *semEnv);
   23.28 +inline void
   23.29 +handleReceiveTypeTo( PRServSemReq *semReq, PRServSemEnv *semEnv);
   23.30 +inline void
   23.31 +handleReceiveFromTo( PRServSemReq *semReq, PRServSemEnv *semEnv);
   23.32 +inline void
   23.33 +handleTaskwait(PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv);
   23.34 +
   23.35 +inline void
   23.36 +handleMalloc( PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv);
   23.37 +inline void
   23.38 +handleFree( PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv );
   23.39 +inline void
   23.40 +handleTransEnd(PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv*semEnv);
   23.41 +inline void
   23.42 +handleTransStart( PRServSemReq *semReq, SlaveVP *requestingSlv,
   23.43 +                  PRServSemEnv *semEnv );
   23.44 +inline void
   23.45 +handleAtomic( PRServSemReq *semReq, SlaveVP *requestingSlv, PRServSemEnv *semEnv);
   23.46 +inline void
   23.47 +handleStartFnSingleton( PRServSemReq *semReq, SlaveVP *reqstingSlv,
   23.48 +                      PRServSemEnv *semEnv );
   23.49 +inline void
   23.50 +handleEndFnSingleton( PRServSemReq *semReq, SlaveVP *requestingSlv,
   23.51 +                    PRServSemEnv *semEnv );
   23.52 +inline void
   23.53 +handleStartDataSingleton( PRServSemReq *semReq, SlaveVP *reqstingSlv,
   23.54 +                      PRServSemEnv *semEnv );
   23.55 +inline void
   23.56 +handleEndDataSingleton( PRServSemReq *semReq, SlaveVP *requestingSlv,
   23.57 +                    PRServSemEnv *semEnv );
   23.58 +inline void
   23.59 +free_task_stub( PRMetaTask *stubToFree );
   23.60 +
   23.61 +
   23.62 +#endif	/* _PRServ_REQ_H */
   23.63 +
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/Services_Offered_by_PR/Services_Language/PRServ_SS.c	Tue Oct 23 23:46:17 2012 -0700
    24.3 @@ -0,0 +1,235 @@
    24.4 +/*
    24.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
    24.6 + *
    24.7 + * Licensed under BSD
    24.8 + */
    24.9 +
   24.10 +#include <stdio.h>
   24.11 +#include <stdlib.h>
   24.12 +#include <malloc.h>
   24.13 +
   24.14 +#include "Queue_impl/PrivateQueue.h"
   24.15 +#include "Hash_impl/PrivateHash.h"
   24.16 +
   24.17 +#include "PRServ.h"
   24.18 +#include "Measurement/PRServ_Counter_Recording.h"
   24.19 +
   24.20 +//==========================================================================
   24.21 +
   24.22 +
   24.23 +
   24.24 +//===========================================================================
   24.25 +
   24.26 +/*
   24.27 + */
   24.28 +void
   24.29 +PRServ__start( SlaveVP *seedSlv )
   24.30 + { PRServSemEnv       *semEnv;
   24.31 +   int32            i, coreNum, slotNum;
   24.32 +   PRServSemData      *semData;
   24.33 +   PRServTaskStub     *threadTaskStub, *parentTaskStub;
   24.34 +   
   24.35 +   semEnv = PR_WL__malloc( sizeof(PRServSemEnv) );
   24.36 +   
   24.37 +   PR_SS__register_langlets_semEnv( semEnv, seedSlv, PRServ_MAGIC_NUMBER );
   24.38 +   
   24.39 +      //seed slave is a thread slave, so make a thread's task stub for it
   24.40 +      // and then make another to stand for the seed's parent task.  Make
   24.41 +      // the parent be already ended, and have one child (the seed).  This
   24.42 +      // will make the dissipate handler do the right thing when the seed
   24.43 +      // is dissipated.
   24.44 +   threadTaskStub = create_thread_task_stub( initData );
   24.45 +   parentTaskStub = create_thread_task_stub( NULL );
   24.46 +   parentTaskStub->isEnded = TRUE;
   24.47 +   parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
   24.48 +   threadTaskStub->parentTaskStub = parentTaskStub;
   24.49 +   
   24.50 +   PR_SS__set_langMetaTask_for_seedSlv( threadTaskStub, seedSlv );
   24.51 +   
   24.52 +      //Hook up the semantic layer's plug-ins to the Master virt procr
   24.53 +   PR_SS__register_create_task_handler(     &createTaskHandler, seedVP, PRServ_MAGIC_NUMBER );
   24.54 +   PR_SS__register_end_task_handler(        &endTaskHandler, seedVP, PRServ_MAGIC_NUMBER );
   24.55 +   PR_SS__register_create_slave_handler(    &createThreadHandler, seedVP, PRServ_MAGIC_NUMBER );
   24.56 +   PR_SS__register_dissipate_slave_handler( &endThreadHandler, seedVP, PRServ_MAGIC_NUMBER );
   24.57 +   PR_SS__register_request_handler(         &PRServ__Request_Handler, seedVP, PRServ_MAGIC_NUMBER );
   24.58 +   PR_SS__register_assigner(                &PRServ__assign_work_to_slot, seedVP, PRServ_MAGIC_NUMBER );
   24.59 +    RequestHandler createInitialSemDataFn;
   24.60 +   RequestHandler resetSemDataFn;
   24.61 +  
   24.62 +   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
   24.63 +   _PRTopEnv->counterHandler = &PRServ__counter_handler;
   24.64 +   PRServ__init_counter_data_structs();
   24.65 +   #endif
   24.66 +
   24.67 +
   24.68 +      //create the ready queues, hash tables used for matching and so forth
   24.69 +   semEnv->slavesReadyToResumeQ = makePRQ();
   24.70 +   semEnv->taskReadyQ           = makePRQ();
   24.71 +   
   24.72 +   semEnv->argPtrHashTbl  = makeHashTable32( 16, &PR_int__free );
   24.73 +   semEnv->commHashTbl    = makeHashTable32( 16, &PR_int__free );
   24.74 +   
   24.75 +   semEnv->nextCoreToGetNewSlv = 0;
   24.76 +   
   24.77 +
   24.78 +   //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
   24.79 +   //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
   24.80 +   //semanticEnv->transactionStrucs = makeDynArrayInfo( );
   24.81 +   for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
   24.82 +    {
   24.83 +      semEnv->fnSingletons[i].endInstrAddr      = NULL;
   24.84 +      semEnv->fnSingletons[i].hasBeenStarted    = FALSE;
   24.85 +      semEnv->fnSingletons[i].hasFinished       = FALSE;
   24.86 +      semEnv->fnSingletons[i].waitQ             = makePRQ();
   24.87 +      semEnv->transactionStrucs[i].waitingVPQ   = makePRQ();
   24.88 +    }
   24.89 +
   24.90 +
   24.91 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   24.92 +   semEnv->unitList = makeListOfArrays(sizeof(Unit),128);
   24.93 +   semEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
   24.94 +   semEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
   24.95 +   semEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
   24.96 +   semEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semEnv->ntonGroups),8);
   24.97 +   
   24.98 +   semEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
   24.99 +   memset(semEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
  24.100 +   #endif
  24.101 + }
  24.102 +
  24.103 +
  24.104 +/*Frees any memory allocated by PRServ__init() then calls PR_int__shutdown
  24.105 + */
  24.106 +void
  24.107 +PRServ__cleanup_after_shutdown()
  24.108 + { PRServSemEnv *semanticEnv;
  24.109 +   
  24.110 +   semanticEnv = _PRTopEnv->semanticEnv;
  24.111 +
  24.112 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  24.113 +   //UCC
  24.114 +   FILE* output;
  24.115 +   int n;
  24.116 +   char filename[255];    
  24.117 +    for(n=0;n<255;n++)
  24.118 +    {
  24.119 +        sprintf(filename, "./counters/UCC.%d",n);
  24.120 +        output = fopen(filename,"r");
  24.121 +        if(output)
  24.122 +        {
  24.123 +            fclose(output);
  24.124 +        }else{
  24.125 +            break;
  24.126 +        }
  24.127 +    }
  24.128 +   if(n<255){
  24.129 +    printf("Saving UCC to File: %s ...\n", filename);
  24.130 +    output = fopen(filename,"w+");
  24.131 +    if(output!=NULL){
  24.132 +        set_dependency_file(output);
  24.133 +        //fprintf(output,"digraph Dependencies {\n");
  24.134 +        //set_dot_file(output);
  24.135 +        //FIXME:  first line still depends on counters being enabled, replace w/ unit struct!
  24.136 +        //forAllInDynArrayDo(_PRTopEnv->counter_history_array_info, &print_dot_node_info );
  24.137 +        forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
  24.138 +        forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
  24.139 +        forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
  24.140 +        forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
  24.141 +        //fprintf(output,"}\n");
  24.142 +        fflush(output);
  24.143 +
  24.144 +    } else
  24.145 +        printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
  24.146 +   } else {
  24.147 +       printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
  24.148 +   }
  24.149 +   //Loop Graph
  24.150 +   for(n=0;n<255;n++)
  24.151 +    {
  24.152 +        sprintf(filename, "./counters/LoopGraph.%d",n);
  24.153 +        output = fopen(filename,"r");
  24.154 +        if(output)
  24.155 +        {
  24.156 +            fclose(output);
  24.157 +        }else{
  24.158 +            break;
  24.159 +        }
  24.160 +    }
  24.161 +   if(n<255){
  24.162 +    printf("Saving LoopGraph to File: %s ...\n", filename);
  24.163 +    output = fopen(filename,"w+");
  24.164 +    if(output!=NULL){
  24.165 +        set_dependency_file(output);
  24.166 +        //fprintf(output,"digraph Dependencies {\n");
  24.167 +        //set_dot_file(output);
  24.168 +        //FIXME:  first line still depends on counters being enabled, replace w/ unit struct!
  24.169 +        //forAllInDynArrayDo(_PRTopEnv->counter_history_array_info, &print_dot_node_info );
  24.170 +        forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
  24.171 +        forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
  24.172 +        forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
  24.173 +        forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
  24.174 +        forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
  24.175 +        //fprintf(output,"}\n");
  24.176 +        fflush(output);
  24.177 +
  24.178 +    } else
  24.179 +        printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
  24.180 +   } else {
  24.181 +       printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
  24.182 +   }
  24.183 +   
  24.184 +   
  24.185 +   freeListOfArrays(semanticEnv->unitList);
  24.186 +   freeListOfArrays(semanticEnv->commDependenciesList);
  24.187 +   freeListOfArrays(semanticEnv->ctlDependenciesList);
  24.188 +   freeListOfArrays(semanticEnv->dynDependenciesList);
  24.189 +   
  24.190 +   #endif
  24.191 +#ifdef HOLISTIC__TURN_ON_PERF_COUNTERS    
  24.192 +    for(n=0;n<255;n++)
  24.193 +    {
  24.194 +        sprintf(filename, "./counters/Counters.%d.csv",n);
  24.195 +        output = fopen(filename,"r");
  24.196 +        if(output)
  24.197 +        {
  24.198 +            fclose(output);
  24.199 +        }else{
  24.200 +            break;
  24.201 +        }
  24.202 +    }
  24.203 +    if(n<255){
  24.204 +    printf("Saving Counter measurements to File: %s ...\n", filename);
  24.205 +    output = fopen(filename,"w+");
  24.206 +    if(output!=NULL){
  24.207 +        set_counter_file(output);
  24.208 +        int i;
  24.209 +        for(i=0;i<NUM_CORES;i++){
  24.210 +            forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
  24.211 +            fflush(output);
  24.212 +        }
  24.213 +
  24.214 +    } else
  24.215 +        printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
  24.216 +   } else {
  24.217 +       printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
  24.218 +   }
  24.219 +    
  24.220 +#endif
  24.221 +/* It's all allocated inside PR's big chunk -- that's about to be freed, so
  24.222 + *  nothing to do here
  24.223 +   
  24.224 +
  24.225 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
  24.226 +    {
  24.227 +      PR_int__free( semanticEnv->readyVPQs[coreIdx]->startOfData );
  24.228 +      PR_int__free( semanticEnv->readyVPQs[coreIdx] );
  24.229 +    }
  24.230 +   PR_int__free( semanticEnv->readyVPQs );
  24.231 +   
  24.232 +   freeHashTable( semanticEnv->commHashTbl );
  24.233 +   PR_int__free( _PRTopEnv->semanticEnv );
  24.234 + */
  24.235 +   PR_SS__cleanup_at_end_of_shutdown();
  24.236 + }
  24.237 +
  24.238 +