changeset 3:468b8638ff92

Works -- first working version, includes slave pruning and shutdown detection
author Sean Halle <seanhalle@yahoo.com>
date Wed, 06 Jun 2012 17:55:36 -0700
parents f2ed1c379fe7
children 13af59ed7ea5
files Measurement/VSs_Counter_Recording.c VSs.c VSs.h VSs_PluginFns.c VSs_Request_Handlers.c
diffstat 5 files changed, 375 insertions(+), 280 deletions(-) [+]
line diff
     1.1 --- a/Measurement/VSs_Counter_Recording.c	Wed May 30 15:02:38 2012 -0700
     1.2 +++ b/Measurement/VSs_Counter_Recording.c	Wed Jun 06 17:55:36 2012 -0700
     1.3 @@ -5,7 +5,7 @@
     1.4  
     1.5  #include "VSs_Counter_Recording.h"
     1.6  #include "VMS_impl/VMS.h"
     1.7 -#include "VSs.h"
     1.8 +#include "VSs_impl/VSs.h"
     1.9  
    1.10  #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    1.11  
     2.1 --- a/VSs.c	Wed May 30 15:02:38 2012 -0700
     2.2 +++ b/VSs.c	Wed Jun 06 17:55:36 2012 -0700
     2.3 @@ -12,7 +12,7 @@
     2.4  #include "Hash_impl/PrivateHash.h"
     2.5  
     2.6  #include "VSs.h"
     2.7 -#include "VSs_Counter_Recording.h"
     2.8 +#include "Measurement/VSs_Counter_Recording.h"
     2.9  
    2.10  //==========================================================================
    2.11  
    2.12 @@ -74,7 +74,7 @@
    2.13  void
    2.14  VSs__create_seed_slave_and_do_work( TopLevelFnPtr fnPtr, void *initData )
    2.15   { VSsSemEnv *semEnv;
    2.16 -   SlaveVP *seedPr;
    2.17 +   SlaveVP *seedSlv;
    2.18  
    2.19     VSs__init();      //normal multi-thd
    2.20     
    2.21 @@ -82,10 +82,13 @@
    2.22  
    2.23        //VSs starts with one processor, which is put into initial environ,
    2.24        // and which then calls create() to create more, thereby expanding work
    2.25 -   seedPr = VSs__create_slave_helper( fnPtr, initData,
    2.26 -                                      semEnv, semEnv->nextCoreToGetNewPr++ );
    2.27 +   seedSlv = VSs__create_slave_helper( fnPtr, initData,
    2.28 +                                      semEnv, semEnv->nextCoreToGetNewSlv++ );
    2.29 +   
    2.30 +      //seedVP doesn't do tasks
    2.31 +   ((VSsSemData *)seedSlv->semanticData)->needsTaskAssigned = FALSE;
    2.32  
    2.33 -   resume_slaveVP( seedPr, semEnv );
    2.34 +   resume_slaveVP( seedSlv, semEnv );
    2.35     
    2.36     VMS_SS__start_the_work_then_wait_until_done();      //normal multi-thd
    2.37  
    2.38 @@ -184,13 +187,17 @@
    2.39     #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    2.40     VSs__init_counter_data_structs();
    2.41     #endif
    2.42 +
    2.43     semanticEnv->shutdownInitiated = FALSE;
    2.44 -   for(i=0;i<NUM_CORES;++i){
    2.45 -       for(j=0;j<NUM_ANIM_SLOTS;++j){
    2.46 -           semanticEnv->idlePr[i][j] = VMS_int__create_slaveVP(&idle_fn,NULL);
    2.47 -           semanticEnv->idlePr[i][j]->coreAnimatedBy = i;
    2.48 +   semanticEnv->coreIsDone = VMS_int__malloc( NUM_CORES * sizeof( bool32 ) );
    2.49 +   for( i = 0; i < NUM_CORES; ++i )
    2.50 +    { semanticEnv->coreIsDone[i] = FALSE;
    2.51 +      for( j = 0; j < NUM_ANIM_SLOTS; ++j )
    2.52 +       {
    2.53 +         semanticEnv->idleSlv[i][j] = VMS_int__create_slaveVP(&idle_fn,NULL);
    2.54 +         semanticEnv->idleSlv[i][j]->coreAnimatedBy = i;
    2.55         }
    2.56 -   }
    2.57 +    }
    2.58  
    2.59     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    2.60     semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
    2.61 @@ -203,10 +210,7 @@
    2.62     memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
    2.63     #endif
    2.64  
    2.65 -      //create the ready queue, hash tables used for pairing send to receive
    2.66 -      // and so forth
    2.67 -      //TODO: add hash tables for pairing sends with receives, and
    2.68 -      // initialize the data ownership system
    2.69 +      //create the ready queue, hash tables used for matching and so forth
    2.70     readyVPQs = VMS_int__malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
    2.71  
    2.72     for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
    2.73 @@ -216,10 +220,12 @@
    2.74     
    2.75     semanticEnv->readyVPQs = readyVPQs;
    2.76     
    2.77 -   semanticEnv->nextCoreToGetNewPr = 0;
    2.78 +   semanticEnv->taskReadyQ = makeVMSQ();
    2.79 +   
    2.80 +   semanticEnv->nextCoreToGetNewSlv = 0;
    2.81     semanticEnv->numSlaveVP = 0;
    2.82     
    2.83 -   semanticEnv->argPtrHashTbl  = makeHashTable( 1<<16, &VMS_int__free );//start big
    2.84 +   semanticEnv->argPtrHashTbl  = makeHashTable32( 16, &VMS_int__free );
    2.85  
    2.86     //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
    2.87     //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
    2.88 @@ -376,7 +382,7 @@
    2.89   */
    2.90  SlaveVP *
    2.91  VSs__create_slave_with( TopLevelFnPtr fnPtr,   void *initData,
    2.92 -                        SlaveVP *creatingPr )
    2.93 +                        SlaveVP *creatingSlv )
    2.94   { VSsSemReq reqData;
    2.95  
    2.96        //the semantic request data is on the stack and disappears when this
    2.97 @@ -386,30 +392,30 @@
    2.98     reqData.coreToAssignOnto = -1; //means round-robin assign
    2.99     reqData.fnPtr              = fnPtr;
   2.100     reqData.initData           = initData;
   2.101 -   reqData.callingSlv             = creatingPr;
   2.102 +   reqData.callingSlv             = creatingSlv;
   2.103  
   2.104 -   VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
   2.105 +   VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
   2.106  
   2.107 -   return creatingPr->dataRetFromReq;
   2.108 +   return creatingSlv->dataRetFromReq;
   2.109   }
   2.110  
   2.111  SlaveVP *
   2.112  VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
   2.113 -                        SlaveVP *creatingPr,  int32  coreToAssignOnto )
   2.114 +                        SlaveVP *creatingSlv,  int32  coreToAssignOnto )
   2.115   { VSsSemReq  reqData;
   2.116  
   2.117        //the semantic request data is on the stack and disappears when this
   2.118        // call returns -- it's guaranteed to remain in the VP's stack for as
   2.119        // long as the VP is suspended.
   2.120 -   reqData.reqType            = create_slave;
   2.121 +   reqData.reqType            = create_slave_w_aff; //not used, May 2012
   2.122     reqData.coreToAssignOnto   = coreToAssignOnto;
   2.123     reqData.fnPtr              = fnPtr;
   2.124     reqData.initData           = initData;
   2.125 -   reqData.callingSlv         = creatingPr;
   2.126 +   reqData.callingSlv         = creatingSlv;
   2.127  
   2.128 -   VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
   2.129 +   VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
   2.130  
   2.131 -   return creatingPr->dataRetFromReq;
   2.132 +   return creatingSlv->dataRetFromReq;
   2.133   }
   2.134  
   2.135  
   2.136 @@ -438,7 +444,7 @@
   2.137    
   2.138   
   2.139     VMS_WL__send_sem_request( &reqData, animSlv );
   2.140 -   return animSlv->dataRetFromReq;
   2.141 +   return (int32)animSlv->dataRetFromReq;
   2.142   }
   2.143  
   2.144  /*NOTE: if want, don't need to send the animating SlaveVP around.. 
   2.145 @@ -488,7 +494,7 @@
   2.146   * semantic environment.
   2.147   */
   2.148  void
   2.149 -VSs__start_fn_singleton( int32 singletonID,   SlaveVP *animPr )
   2.150 +VSs__start_fn_singleton( int32 singletonID,   SlaveVP *animSlv )
   2.151   {
   2.152     VSsSemReq  reqData;
   2.153  
   2.154 @@ -496,10 +502,10 @@
   2.155     reqData.reqType     = singleton_fn_start;
   2.156     reqData.singletonID = singletonID;
   2.157  
   2.158 -   VMS_WL__send_sem_request( &reqData, animPr );
   2.159 -   if( animPr->dataRetFromReq ) //will be 0 or addr of label in end singleton
   2.160 +   VMS_WL__send_sem_request( &reqData, animSlv );
   2.161 +   if( animSlv->dataRetFromReq ) //will be 0 or addr of label in end singleton
   2.162      {
   2.163 -       VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   2.164 +       VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
   2.165         asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
   2.166      }
   2.167   }
   2.168 @@ -509,7 +515,7 @@
   2.169   * location.
   2.170   */
   2.171  void
   2.172 -VSs__start_data_singleton( VSsSingleton **singletonAddr,  SlaveVP *animPr )
   2.173 +VSs__start_data_singleton( VSsSingleton **singletonAddr,  SlaveVP *animSlv )
   2.174   {
   2.175     VSsSemReq  reqData;
   2.176  
   2.177 @@ -519,8 +525,8 @@
   2.178     reqData.reqType          = singleton_data_start;
   2.179     reqData.singletonPtrAddr = singletonAddr;
   2.180  
   2.181 -   VMS_WL__send_sem_request( &reqData, animPr );
   2.182 -   if( animPr->dataRetFromReq ) //either 0 or end singleton's return addr
   2.183 +   VMS_WL__send_sem_request( &reqData, animSlv );
   2.184 +   if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr
   2.185      {    //Assembly code changes the return addr on the stack to the one
   2.186           // saved into the singleton by the end-singleton-fn
   2.187           //The return addr is at 0x4(%%ebp)
   2.188 @@ -538,26 +544,26 @@
   2.189   * inside is shared by all invocations of a given singleton ID.
   2.190   */
   2.191  void
   2.192 -VSs__end_fn_singleton( int32 singletonID, SlaveVP *animPr )
   2.193 +VSs__end_fn_singleton( int32 singletonID, SlaveVP *animSlv )
   2.194   {
   2.195     VSsSemReq  reqData;
   2.196  
   2.197        //don't need this addr until after at least one singleton has reached
   2.198        // this function
   2.199 -   VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   2.200 +   VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
   2.201     asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
   2.202  
   2.203     reqData.reqType     = singleton_fn_end;
   2.204     reqData.singletonID = singletonID;
   2.205  
   2.206 -   VMS_WL__send_sem_request( &reqData, animPr );
   2.207 +   VMS_WL__send_sem_request( &reqData, animSlv );
   2.208  
   2.209  EndSingletonInstrAddr:
   2.210     return;
   2.211   }
   2.212  
   2.213  void
   2.214 -VSs__end_data_singleton(  VSsSingleton **singletonPtrAddr, SlaveVP *animPr )
   2.215 +VSs__end_data_singleton(  VSsSingleton **singletonPtrAddr, SlaveVP *animSlv )
   2.216   {
   2.217     VSsSemReq  reqData;
   2.218  
   2.219 @@ -575,7 +581,7 @@
   2.220     reqData.reqType          = singleton_data_end;
   2.221     reqData.singletonPtrAddr = singletonPtrAddr;
   2.222  
   2.223 -   VMS_WL__send_sem_request( &reqData, animPr );
   2.224 +   VMS_WL__send_sem_request( &reqData, animSlv );
   2.225   }
   2.226  
   2.227  /*This executes the function in the masterVP, so it executes in isolation
   2.228 @@ -590,7 +596,7 @@
   2.229   */
   2.230  void
   2.231  VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   2.232 -                                    void *data, SlaveVP *animPr )
   2.233 +                                    void *data, SlaveVP *animSlv )
   2.234   {
   2.235     VSsSemReq  reqData;
   2.236  
   2.237 @@ -599,7 +605,7 @@
   2.238     reqData.fnToExecInMaster = ptrToFnToExecInMaster;
   2.239     reqData.dataForFn        = data;
   2.240  
   2.241 -   VMS_WL__send_sem_request( &reqData, animPr );
   2.242 +   VMS_WL__send_sem_request( &reqData, animSlv );
   2.243   }
   2.244  
   2.245  
   2.246 @@ -617,16 +623,16 @@
   2.247   *If NULL, then write requesting into the field and resume.
   2.248   */
   2.249  void
   2.250 -VSs__start_transaction( int32 transactionID, SlaveVP *animPr )
   2.251 +VSs__start_transaction( int32 transactionID, SlaveVP *animSlv )
   2.252   {
   2.253     VSsSemReq  reqData;
   2.254  
   2.255        //
   2.256 -   reqData.callingSlv      = animPr;
   2.257 +   reqData.callingSlv      = animSlv;
   2.258     reqData.reqType     = trans_start;
   2.259     reqData.transID     = transactionID;
   2.260  
   2.261 -   VMS_WL__send_sem_request( &reqData, animPr );
   2.262 +   VMS_WL__send_sem_request( &reqData, animSlv );
   2.263   }
   2.264  
   2.265  /*This suspends to the master, then uses transactionID as index into an
   2.266 @@ -639,14 +645,14 @@
   2.267   * resumes both.
   2.268   */
   2.269  void
   2.270 -VSs__end_transaction( int32 transactionID, SlaveVP *animPr )
   2.271 +VSs__end_transaction( int32 transactionID, SlaveVP *animSlv )
   2.272   {
   2.273     VSsSemReq  reqData;
   2.274  
   2.275        //
   2.276 -   reqData.callingSlv      = animPr;
   2.277 +   reqData.callingSlv      = animSlv;
   2.278     reqData.reqType     = trans_end;
   2.279     reqData.transID     = transactionID;
   2.280  
   2.281 -   VMS_WL__send_sem_request( &reqData, animPr );
   2.282 +   VMS_WL__send_sem_request( &reqData, animSlv );
   2.283   }
     3.1 --- a/VSs.h	Wed May 30 15:02:38 2012 -0700
     3.2 +++ b/VSs.h	Wed Jun 06 17:55:36 2012 -0700
     3.3 @@ -12,7 +12,7 @@
     3.4  #include "Queue_impl/PrivateQueue.h"
     3.5  #include "Hash_impl/PrivateHash.h"
     3.6  #include "VMS_impl/VMS.h"
     3.7 -#include "dependency.h"
     3.8 +#include "Measurement/dependency.h"
     3.9  
    3.10  
    3.11  //===========================================================================
    3.12 @@ -29,13 +29,13 @@
    3.13  /*This header defines everything specific to the VSs semantic plug-in
    3.14   */
    3.15  typedef struct _VSsSemReq   VSsSemReq;
    3.16 -typedef void  (*VSsTaskFnPtr )   ( void * ); //executed atomically in master
    3.17 +typedef void  (*VSsTaskFnPtr )   ( void *, SlaveVP *);
    3.18  typedef void  (*PtrToAtomicFn )  ( void * ); //executed atomically in master
    3.19  //===========================================================================
    3.20  
    3.21  #define IN    1
    3.22  #define OUT   2
    3.23 -#define INOUT 3
    3.24 +#define INOUT 2
    3.25  
    3.26  #define READER  1
    3.27  #define WRITER  2
    3.28 @@ -54,10 +54,19 @@
    3.29  
    3.30  typedef struct
    3.31   {
    3.32 +   bool32       hasEnabledNonFinishedWriter;
    3.33 +   int32        numEnabledNonDoneReaders;
    3.34 +   PrivQueueStruc *waitersQ;
    3.35 + }
    3.36 +VSsPointerEntry;
    3.37 +
    3.38 +typedef struct
    3.39 + {
    3.40     void       **args; //ctld args must come first, as ptrs
    3.41     VSsTaskType *taskType;
    3.42     int32        numBlockingProp;
    3.43     SlaveVP     *slaveAssignedTo;
    3.44 +   VSsPointerEntry  **ptrEntries;
    3.45   }
    3.46  VSsTaskStub;
    3.47  
    3.48 @@ -69,14 +78,6 @@
    3.49   }
    3.50  VSsTaskStubCarrier;
    3.51  
    3.52 -typedef struct
    3.53 - {
    3.54 -   bool32       hasEnabledNonFinishedWriter;
    3.55 -   int32        numEnabledNonDoneReaders;
    3.56 -   PrivQStruct *waitersQ;
    3.57 - }
    3.58 -VSsPointerEntry;
    3.59 -
    3.60  
    3.61  typedef struct
    3.62   {
    3.63 @@ -157,12 +158,15 @@
    3.64     PrivQueueStruc  *taskReadyQ;  //Q: shared or local?
    3.65     HashTable       *argPtrHashTbl;
    3.66     int32            numSlaveVP;
    3.67 -   int32            nextCoreToGetNewPr;
    3.68 +   int32            nextCoreToGetNewSlv;
    3.69     int32            primitiveStartTime;
    3.70  
    3.71                         //fix limit on num with dynArray
    3.72     VSsSingleton     fnSingletons[NUM_STRUCS_IN_SEM_ENV];
    3.73     VSsTrans         transactionStrucs[NUM_STRUCS_IN_SEM_ENV];
    3.74 +
    3.75 +   bool32          *coreIsDone;
    3.76 +   int32            numCoresDone;
    3.77     
    3.78     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    3.79     ListOfArrays* unitList;
    3.80 @@ -178,7 +182,7 @@
    3.81     #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    3.82     ListOfArrays* counterList[NUM_CORES];
    3.83     #endif
    3.84 -   SlaveVP* idlePr[NUM_CORES][NUM_ANIM_SLOTS];
    3.85 +   SlaveVP* idleSlv[NUM_CORES][NUM_ANIM_SLOTS];
    3.86     int shutdownInitiated;
    3.87   }
    3.88  VSsSemEnv;
    3.89 @@ -237,7 +241,7 @@
    3.90  
    3.91    SlaveVP *
    3.92  VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr,    void *initData,
    3.93 -                            SlaveVP *creatingPr, int32 coreToAssignOnto);
    3.94 +                            SlaveVP *creatingSlv, int32 coreToAssignOnto);
    3.95  
    3.96  void
    3.97  VSs__dissipate_slave( SlaveVP *slaveToDissipate );
    3.98 @@ -251,7 +255,7 @@
    3.99  
   3.100  //=======================
   3.101  int32
   3.102 -VSs__submit_task( VSsTaskType *taskType, void **args, SlaveVP *animSlv);
   3.103 +VSs__submit_task( VSsTaskType *taskType, void *args, SlaveVP *animSlv);
   3.104  
   3.105  
   3.106  void
   3.107 @@ -284,7 +288,7 @@
   3.108  
   3.109  //=========================  Internal use only  =============================
   3.110  void
   3.111 -VSs__Request_Handler( SlaveVP *requestingPr, void *_semEnv );
   3.112 +VSs__Request_Handler( SlaveVP *requestingSlv, void *_semEnv );
   3.113  
   3.114  SlaveVP *
   3.115  VSs__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot );
   3.116 @@ -294,7 +298,7 @@
   3.117                            VSsSemEnv *semEnv,    int32 coreToAssignOnto );
   3.118  
   3.119  //=====================  Measurement of Lang Overheads  =====================
   3.120 -#include "VSs_Measurement.h"
   3.121 +#include "Measurement/VSs_Measurement.h"
   3.122  
   3.123  //===========================================================================
   3.124  #endif	/* _VSs_H */
     4.1 --- a/VSs_PluginFns.c	Wed May 30 15:02:38 2012 -0700
     4.2 +++ b/VSs_PluginFns.c	Wed Jun 06 17:55:36 2012 -0700
     4.3 @@ -16,13 +16,13 @@
     4.4  resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv );
     4.5  
     4.6  void
     4.7 -handleSemReq( VMSReqst *req, SlaveVP *requestingPr, VSsSemEnv *semEnv );
     4.8 +handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv );
     4.9  
    4.10  void
    4.11 -handleDissipate(                SlaveVP *requestingPr, VSsSemEnv *semEnv );
    4.12 +handleDissipate(                SlaveVP *requestingSlv, VSsSemEnv *semEnv );
    4.13  
    4.14  void
    4.15 -handleCreate(    VMSReqst *req, SlaveVP *requestingPr, VSsSemEnv *semEnv );
    4.16 +handleCreate(    VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv );
    4.17  
    4.18  //============================== Assigner ==================================
    4.19  //
    4.20 @@ -34,9 +34,10 @@
    4.21   */
    4.22  SlaveVP *
    4.23  VSs__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot )
    4.24 - { SlaveVP   *assignPr;
    4.25 -   VSsSemEnv *semEnv;
    4.26 -   int32      coreNum, slotNum;
    4.27 + { SlaveVP    *assignSlv;
    4.28 +   VSsSemEnv  *semEnv;
    4.29 +   VSsSemData *semData;
    4.30 +   int32       coreNum, slotNum;
    4.31     
    4.32     coreNum = slot->coreSlotIsOn;
    4.33     slotNum = slot->slotIdx;
    4.34 @@ -54,76 +55,118 @@
    4.35      * of slaves, and take one from pool when a task suspends.
    4.36      */
    4.37     //TODO: fix false sharing in array
    4.38 -   assignPr = readPrivQ( semEnv->readyVPQs[coreNum] );
    4.39 -   if( assignPr == NULL )
    4.40 -    { //if there are tasks ready to go, then make a new slave to animate
    4.41 -      // This only happens when all available slaves are blocked by
    4.42 -      // constructs like send, or mutex, and so on..
    4.43 -      VMS_PI__throw_exception( "no slaves in readyQ", NULL, NULL );
    4.44 +   assignSlv = readPrivQ( semEnv->readyVPQs[coreNum] );
    4.45 +   if( assignSlv == NULL )
    4.46 +    {    //make a new slave to animate
    4.47 +         //This happens for the first task on the core and when all available
    4.48 +         // slaves are blocked by constructs like send, or mutex, and so on..
    4.49 +      assignSlv = VSs__create_slave_helper( NULL, NULL, semEnv, coreNum );
    4.50      }
    4.51 -   if( assignPr != NULL ) //could still be NULL, if no tasks avail
    4.52 -    {
    4.53 -      if( ((VSsSemData *)assignPr->semanticData)->needsTaskAssigned )
    4.54 -       { VSsTaskStub *
    4.55 -         newTaskStub = readQ( semEnv->taskReadyQ );
    4.56 -         if( newTaskStub == NULL )
    4.57 -          { //No task, so slave unused, so put it back and return "no-slave"
    4.58 -            writeQ( assignPr, semEnv->readyVPQs[coreNum] );
    4.59 -            return NULL;
    4.60 +   semData = (VSsSemData *)assignSlv->semanticData;
    4.61 +      //slave could be resuming a task in progress, check for this
    4.62 +   if( semData->needsTaskAssigned )
    4.63 +    {    //no, not resuming, needs a task..
    4.64 +      VSsTaskStub *newTaskStub;
    4.65 +      SlaveVP *extraSlv;
    4.66 +      newTaskStub = readPrivQ( semEnv->taskReadyQ );
    4.67 +      if( newTaskStub == NULL )
    4.68 +       { //No task, so slave unused, so put it back and return "no-slave"
    4.69 +         //But first check if have extra free slaves
    4.70 +         extraSlv = readPrivQ( semEnv->readyVPQs[coreNum] );
    4.71 +         if( extraSlv == NULL )
    4.72 +          {    //means no tasks and no slave on this core can generate more
    4.73 +            //TODO: false sharing
    4.74 +            if( semEnv->coreIsDone[coreNum] == FALSE)
    4.75 +             { semEnv->numCoresDone += 1;
    4.76 +               semEnv->coreIsDone[coreNum] = TRUE;
    4.77 +               #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    4.78 +               semEnv->shutdownInitiated = TRUE;
    4.79 +               #else
    4.80 +               if( semEnv->numCoresDone == NUM_CORES )
    4.81 +                {    //means no cores have work, and none can generate more
    4.82 +                  semEnv->shutdownInitiated = TRUE;
    4.83 +                }
    4.84 +               #endif
    4.85 +             }
    4.86 +               //put slave back into Q and return NULL
    4.87 +            writePrivQ( assignSlv, semEnv->readyVPQs[coreNum] );
    4.88 +            assignSlv = NULL;
    4.89 +               //except if shutdown has been initiated by this or other core
    4.90 +            if(semEnv->shutdownInitiated) 
    4.91 +             { assignSlv = VMS_SS__create_shutdown_slave();
    4.92 +             }
    4.93            }
    4.94 -         //point slave to the task's function, and mark slave as having task
    4.95 -         VMS_int__reset_slaveVP_to_TopLvlFn( assignPr, 
    4.96 +         else //extra slave exists, but no tasks for either slave
    4.97 +          { if(((VSsSemData *)extraSlv->semanticData)->needsTaskAssigned == TRUE)
    4.98 +             {    //means have two slaves need tasks -- redundant, kill one
    4.99 +               handleDissipate( extraSlv, semEnv );
   4.100 +                  //then put other back into Q and return NULL
   4.101 +               writePrivQ( assignSlv, semEnv->readyVPQs[coreNum] );
   4.102 +               assignSlv = NULL;
   4.103 +             }
   4.104 +            else
   4.105 +             {    //extra slave has work -- so take it instead
   4.106 +               writePrivQ( assignSlv, semEnv->readyVPQs[coreNum] );
   4.107 +               assignSlv = extraSlv;
   4.108 +               //semData = (VSsSemData *)assignSlv->semanticData; Don't use
   4.109 +             }
   4.110 +          }
   4.111 +       }
   4.112 +      else //have a new task for the slave.
   4.113 +       { //point slave to task's function, and mark slave as having task
   4.114 +         VMS_int__reset_slaveVP_to_TopLvlFn( assignSlv, 
   4.115                               newTaskStub->taskType->fn, newTaskStub->args );
   4.116 -         ((VSsSemData *)assignPr->semanticData)->taskStub = newTaskStub;
   4.117 -         newTaskStub->slaveAssignedTo = assignPr;
   4.118 -         ((VSsSemData *)assignPr->semanticData)->needsTaskAssigned = FALSE;
   4.119 +         semData->taskStub = newTaskStub;
   4.120 +         newTaskStub->slaveAssignedTo = assignSlv;
   4.121 +         semData->needsTaskAssigned = FALSE;
   4.122         }
   4.123 +    } //outcome: 1)slave didn't need a new task 2)slave just pointed at one
   4.124 +      //         3)no tasks, so slave NULL
   4.125 +   
   4.126     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.127 +   if( assignSlv == NULL )
   4.128 +    { assignSlv = semEnv->idleSlv[coreNum][slotNum]; 
   4.129 +      if(semEnv->shutdownInitiated) 
   4.130 +       { assignSlv = VMS_SS__create_shutdown_slave();
   4.131 +       }
   4.132 +         //things that would normally happen in resume(), but these VPs
   4.133 +         // never go there
   4.134 +      assignSlv->assignCount++; //Somewhere here!
   4.135 +      Unit newu;
   4.136 +      newu.vp = assignSlv->slaveID;
   4.137 +      newu.task = assignSlv->assignCount;
   4.138 +      addToListOfArrays(Unit,newu,semEnv->unitList);
   4.139 +
   4.140 +      if (assignSlv->assignCount > 1)
   4.141 +       { Dependency newd;
   4.142 +         newd.from_vp = assignSlv->slaveID;
   4.143 +         newd.from_task = assignSlv->assignCount - 1;
   4.144 +         newd.to_vp = assignSlv->slaveID;
   4.145 +         newd.to_task = assignSlv->assignCount;
   4.146 +         addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   4.147 +       }
   4.148      }
   4.149 -      //Note, using a non-blocking queue -- it returns NULL if queue empty
   4.150 -   else //assignPr is indeed NULL
   4.151 -    { assignPr = semEnv->idlePr[coreNum][slotNum]; 
   4.152 -      if(semEnv->shutdownInitiated) 
   4.153 -       { assignPr = VMS_SS__create_shutdown_slave();
   4.154 -       }
   4.155 -      //things that would normally happen in resume(), but these VPs
   4.156 -      // never go there
   4.157 -         assignPr->assignCount++; //Somewhere here!
   4.158 -         Unit newu;
   4.159 -         newu.vp = assignPr->slaveID;
   4.160 -         newu.task = assignPr->assignCount;
   4.161 -         addToListOfArrays(Unit,newu,semEnv->unitList);
   4.162 -
   4.163 -         if (assignPr->assignCount > 1)
   4.164 -          { Dependency newd;
   4.165 -            newd.from_vp = assignPr->slaveID;
   4.166 -            newd.from_task = assignPr->assignCount - 1;
   4.167 -            newd.to_vp = assignPr->slaveID;
   4.168 -            newd.to_task = assignPr->assignCount;
   4.169 -            addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   4.170 -          }
   4.171 -      #endif
   4.172 -    }
   4.173 +   #endif
   4.174     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.175 -   if( assignPr != NULL )
   4.176 -    { //assignPr->numTimesAssigned++;
   4.177 +   if( assignSlv != NULL )
   4.178 +    { //assignSlv->numTimesAssigned++;
   4.179        Unit prev_in_slot = 
   4.180           semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
   4.181        if(prev_in_slot.vp != 0)
   4.182         { Dependency newd;
   4.183           newd.from_vp = prev_in_slot.vp;
   4.184           newd.from_task = prev_in_slot.task;
   4.185 -         newd.to_vp = assignPr->slaveID;
   4.186 -         newd.to_task = assignPr->assignCount;
   4.187 +         newd.to_vp = assignSlv->slaveID;
   4.188 +         newd.to_task = assignSlv->assignCount;
   4.189           addToListOfArrays(Dependency,newd,semEnv->hwArcs);   
   4.190         }
   4.191 -      prev_in_slot.vp = assignPr->slaveID;
   4.192 -      prev_in_slot.task = assignPr->assignCount;
   4.193 +      prev_in_slot.vp = assignSlv->slaveID;
   4.194 +      prev_in_slot.task = assignSlv->assignCount;
   4.195        semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] =
   4.196           prev_in_slot;        
   4.197      }
   4.198     #endif
   4.199 -   return( assignPr );
   4.200 +   return( assignSlv );
   4.201   }
   4.202  
   4.203  
   4.204 @@ -132,38 +175,38 @@
   4.205  /*
   4.206   */
   4.207  void
   4.208 -VSs__Request_Handler( SlaveVP *requestingPr, void *_semEnv )
   4.209 +VSs__Request_Handler( SlaveVP *requestingSlv, void *_semEnv )
   4.210   { VSsSemEnv *semEnv;
   4.211     VMSReqst  *req;
   4.212     
   4.213     semEnv = (VSsSemEnv *)_semEnv;
   4.214  
   4.215 -   req    = VMS_PI__take_next_request_out_of( requestingPr );
   4.216 +   req    = VMS_PI__take_next_request_out_of( requestingSlv );
   4.217  
   4.218     while( req != NULL )
   4.219      {
   4.220        switch( req->reqType )
   4.221 -       { case semantic:     handleSemReq(         req, requestingPr, semEnv);
   4.222 +       { case semantic:     handleSemReq(         req, requestingSlv, semEnv);
   4.223              break;
   4.224 -         case createReq:    handleCreate(         req, requestingPr, semEnv);
   4.225 +         case createReq:    handleCreate(         req, requestingSlv, semEnv);
   4.226              break;
   4.227 -         case dissipate:    handleDissipate(      req, requestingPr, semEnv);
   4.228 +         case dissipate:    handleDissipate(           requestingSlv, semEnv);
   4.229              break;
   4.230 -         case VMSSemantic:  VMS_PI__handle_VMSSemReq(req, requestingPr, semEnv,
   4.231 +         case VMSSemantic:  VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv,
   4.232                                               (ResumeSlvFnPtr) &resume_slaveVP);
   4.233              break;
   4.234           default:
   4.235              break;
   4.236         }
   4.237        
   4.238 -      req = VMS_PI__take_next_request_out_of( requestingPr );
   4.239 +      req = VMS_PI__take_next_request_out_of( requestingSlv );
   4.240      } //while( req != NULL )
   4.241  
   4.242   }
   4.243  
   4.244  
   4.245  void
   4.246 -handleSemReq( VMSReqst *req, SlaveVP *reqPr, VSsSemEnv *semEnv )
   4.247 +handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VSsSemEnv *semEnv )
   4.248   { VSsSemReq *semReq;
   4.249  
   4.250     semReq = VMS_PI__take_sem_reqst_from(req);
   4.251 @@ -175,23 +218,23 @@
   4.252        case end_task:        handleEndTask(      semReq,        semEnv);
   4.253           break;
   4.254        //====================================================================
   4.255 -      case malloc_req:      handleMalloc(       semReq, reqPr, semEnv);
   4.256 +      case malloc_req:      handleMalloc(       semReq, reqSlv, semEnv);
   4.257           break;
   4.258 -      case free_req:        handleFree(         semReq, reqPr, semEnv);
   4.259 +      case free_req:        handleFree(         semReq, reqSlv, semEnv);
   4.260           break;
   4.261 -      case singleton_fn_start:  handleStartFnSingleton(semReq, reqPr, semEnv);
   4.262 +      case singleton_fn_start:  handleStartFnSingleton(semReq, reqSlv, semEnv);
   4.263           break;
   4.264 -      case singleton_fn_end:    handleEndFnSingleton(  semReq, reqPr, semEnv);
   4.265 +      case singleton_fn_end:    handleEndFnSingleton(  semReq, reqSlv, semEnv);
   4.266           break;
   4.267 -      case singleton_data_start:handleStartDataSingleton(semReq,reqPr,semEnv);
   4.268 +      case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv);
   4.269           break;
   4.270 -      case singleton_data_end:  handleEndDataSingleton(semReq, reqPr, semEnv);
   4.271 +      case singleton_data_end:  handleEndDataSingleton(semReq, reqSlv, semEnv);
   4.272           break;
   4.273 -      case atomic:          handleAtomic(       semReq, reqPr, semEnv);
   4.274 +      case atomic:          handleAtomic(       semReq, reqSlv, semEnv);
   4.275           break;
   4.276 -      case trans_start:     handleTransStart(   semReq, reqPr, semEnv);
   4.277 +      case trans_start:     handleTransStart(   semReq, reqSlv, semEnv);
   4.278           break;
   4.279 -      case trans_end:       handleTransEnd(     semReq, reqPr, semEnv);
   4.280 +      case trans_end:       handleTransEnd(     semReq, reqSlv, semEnv);
   4.281           break;
   4.282      }
   4.283   }
   4.284 @@ -202,96 +245,90 @@
   4.285  /*SlaveVP dissipate  (NOT task-end!)
   4.286   */
   4.287  void
   4.288 -handleDissipate( SlaveVP *requestingPr, VSsSemEnv *semEnv )
   4.289 +handleDissipate( SlaveVP *requestingSlv, VSsSemEnv *semEnv )
   4.290   {
   4.291 -    DEBUG__printf1(dbgRqstHdlr,"Dissipate request from processor %d",requestingPr->slaveID)
   4.292 +    DEBUG__printf1(dbgRqstHdlr,"Dissipate request from processor %d",requestingSlv->slaveID)
   4.293        //free any semantic data allocated to the virt procr
   4.294 -   VMS_PI__free( requestingPr->semanticData );
   4.295 +   VMS_PI__free( requestingSlv->semanticData );
   4.296  
   4.297        //Now, call VMS to free_all AppVP state -- stack and so on
   4.298 -   VMS_PI__dissipate_slaveVP( requestingPr );
   4.299 -
   4.300 -   semEnv->numSlaveVP -= 1;
   4.301 -   if( semEnv->numSlaveVP == 0 )
   4.302 -    {    //no more work, so shutdown
   4.303 -       semEnv->shutdownInitiated = TRUE;
   4.304 -      //VMS_SS__shutdown();
   4.305 -    }
   4.306 +   VMS_PI__dissipate_slaveVP( requestingSlv );
   4.307   }
   4.308  
   4.309  /*Re-use this in the entry-point fn
   4.310   */
   4.311 -  SlaveVP *
   4.312 +SlaveVP *
   4.313  VSs__create_slave_helper( TopLevelFnPtr fnPtr, void *initData,
   4.314                            VSsSemEnv *semEnv,    int32 coreToAssignOnto )
   4.315 - { SlaveVP    *newPr;
   4.316 + { SlaveVP    *newSlv;
   4.317     VSsSemData   *semData;
   4.318  
   4.319        //This is running in master, so use internal version
   4.320 -   newPr = VMS_PI__create_slaveVP( fnPtr, initData );
   4.321 +   newSlv = VMS_PI__create_slaveVP( fnPtr, initData );
   4.322  
   4.323     semEnv->numSlaveVP += 1;
   4.324  
   4.325     semData = VMS_PI__malloc( sizeof(VSsSemData) );
   4.326     semData->highestTransEntered = -1;
   4.327     semData->lastTransEntered    = NULL;
   4.328 -
   4.329 -   newPr->semanticData = semData;
   4.330 +   semData->needsTaskAssigned   = TRUE;
   4.331 +   
   4.332 +   newSlv->semanticData = semData;
   4.333  
   4.334     //=================== Assign new processor to a core =====================
   4.335     #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   4.336 -   newPr->coreAnimatedBy = 0;
   4.337 +   newSlv->coreAnimatedBy = 0;
   4.338  
   4.339     #else
   4.340  
   4.341     if(coreToAssignOnto < 0 || coreToAssignOnto >= NUM_CORES )
   4.342      {    //out-of-range, so round-robin assignment
   4.343 -      newPr->coreAnimatedBy = semEnv->nextCoreToGetNewPr;
   4.344 +      newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv;
   4.345  
   4.346 -      if( semEnv->nextCoreToGetNewPr >= NUM_CORES - 1 )
   4.347 -          semEnv->nextCoreToGetNewPr  = 0;
   4.348 +      if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 )
   4.349 +          semEnv->nextCoreToGetNewSlv  = 0;
   4.350        else
   4.351 -          semEnv->nextCoreToGetNewPr += 1;
   4.352 +          semEnv->nextCoreToGetNewSlv += 1;
   4.353      }
   4.354     else //core num in-range, so use it
   4.355 -    { newPr->coreAnimatedBy = coreToAssignOnto;
   4.356 +    { newSlv->coreAnimatedBy = coreToAssignOnto;
   4.357      }
   4.358     #endif
   4.359     //========================================================================
   4.360  
   4.361 -   return newPr;
   4.362 +   return newSlv;
   4.363   }
   4.364  
   4.365  /*SlaveVP create  (NOT task create!)
   4.366   */
   4.367  void
   4.368 -handleCreate( VMSReqst *req, SlaveVP *requestingPr, VSsSemEnv *semEnv  )
   4.369 +handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv  )
   4.370   { VSsSemReq *semReq;
   4.371 -   SlaveVP    *newPr;
   4.372 +   SlaveVP    *newSlv;
   4.373     
   4.374     
   4.375     semReq = VMS_PI__take_sem_reqst_from( req );
   4.376   
   4.377 -   newPr = VSs__create_slave_helper( semReq->fnPtr, semReq->initData, semEnv,
   4.378 +   newSlv = VSs__create_slave_helper( semReq->fnPtr, semReq->initData, semEnv,
   4.379                                       semReq->coreToAssignOnto );
   4.380     
   4.381 -         DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d", requestingPr->slaveID, newPr->slaveID)
   4.382 +         DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d", requestingSlv->slaveID, newSlv->slaveID)
   4.383  
   4.384     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.385     Dependency newd;
   4.386 -   newd.from_vp = requestingPr->slaveID;
   4.387 -   newd.from_task = requestingPr->assignCount;
   4.388 -   newd.to_vp = newPr->slaveID;
   4.389 +   newd.from_vp = requestingSlv->slaveID;
   4.390 +   newd.from_task = requestingSlv->assignCount;
   4.391 +   newd.to_vp = newSlv->slaveID;
   4.392     newd.to_task = 1;
   4.393     //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   4.394     addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   4.395     #endif
   4.396  
   4.397        //For VSs, caller needs ptr to created processor returned to it
   4.398 -   requestingPr->dataRetFromReq = newPr;
   4.399 +   requestingSlv->dataRetFromReq = newSlv;
   4.400  
   4.401 -   resume_slaveVP( newPr,        semEnv );
   4.402 -   resume_slaveVP( requestingPr, semEnv );
   4.403 +   resume_slaveVP( requestingSlv, semEnv );
   4.404 +   resume_slaveVP( newSlv,        semEnv );
   4.405   }
   4.406  
   4.407  
     5.1 --- a/VSs_Request_Handlers.c	Wed May 30 15:02:38 2012 -0700
     5.2 +++ b/VSs_Request_Handlers.c	Wed Jun 06 17:55:36 2012 -0700
     5.3 @@ -25,7 +25,7 @@
     5.4  //
     5.5  
     5.6  /*Only clone the elements of req used in these reqst handlers
     5.7 - */
     5.8 + *
     5.9    VSsSemReq *
    5.10  cloneReq( VSsSemReq *semReq )
    5.11   { VSsSemReq *clonedReq;
    5.12 @@ -38,7 +38,9 @@
    5.13     
    5.14     return clonedReq;
    5.15   }
    5.16 +*/
    5.17  
    5.18 +/*
    5.19  HashEntry *
    5.20  giveEntryElseInsertReqst( char *key, VSsSemReq *semReq,
    5.21      HashTable   *commHashTbl )
    5.22 @@ -60,15 +62,30 @@
    5.23      }
    5.24     return entry;
    5.25   }
    5.26 -
    5.27 +*/
    5.28 +  
    5.29 +/*Various ideas for getting the 64b pointer into the two 32b key-array
    5.30 + * positions
    5.31 +   key[0] = 2; //two 32b values in key
    5.32 +  OR 
    5.33 +   (uint64) (key[1]) = argPtr;
    5.34 +  OR
    5.35 +   *( (uint64*)&key[1] ) = argPtr;
    5.36 +  OR
    5.37 +   key[2] = (uint32)argPtr;           //low bits
    5.38 +   key[1] = (uint32)(argPtr >> 32);   //high bits
    5.39 +*/
    5.40 +      
    5.41  inline VSsPointerEntry *
    5.42 -create_pointer_entry_and_insert( void *argPtr )
    5.43 - { VSsPointerEntry newEntry;
    5.44 +create_pointer_entry( )
    5.45 + { VSsPointerEntry *newEntry;
    5.46     
    5.47     newEntry = VMS_PI__malloc( sizeof(VSsPointerEntry) );
    5.48     newEntry->hasEnabledNonFinishedWriter = FALSE;
    5.49     newEntry->numEnabledNonDoneReaders    = 0;
    5.50     newEntry->waitersQ                    = makePrivQ();
    5.51 +      
    5.52 +   return newEntry;
    5.53   }
    5.54  
    5.55  /*malloc's space and initializes fields -- and COPIES the arg values
    5.56 @@ -79,21 +96,25 @@
    5.57   { void **newArgs;
    5.58     int32  i, numArgs;
    5.59     VSsTaskStub *
    5.60 -   newStub = malloc( sizeof(VSsTaskStub) + taskType->sizeOfArgs );
    5.61 +   newStub = VMS_int__malloc( sizeof(VSsTaskStub) + taskType->sizeOfArgs );
    5.62     newStub->numBlockingProp = taskType->numCtldArgs;
    5.63     newStub->slaveAssignedTo = NULL;
    5.64     newStub->taskType = taskType;
    5.65 -   newArgs = (void **)((uint8 *)newStub) + sizeof(VSsTaskStub);
    5.66 +   newStub->ptrEntries = 
    5.67 +      VMS_int__malloc( taskType->numCtldArgs * sizeof(VSsPointerEntry *) );
    5.68 +   newArgs = (void **)( (uint8 *)newStub + sizeof(VSsTaskStub) );
    5.69     newStub->args = newArgs;
    5.70     
    5.71        //Copy the arg-pointers.. can be more arguments than just the ones 
    5.72        // that StarSs uses to control ordering of task execution.
    5.73     memcpy( newArgs, args, taskType->sizeOfArgs );
    5.74 +   
    5.75 +   return newStub;
    5.76   }
    5.77  
    5.78  inline VSsTaskStubCarrier *
    5.79  create_task_carrier( VSsTaskStub *taskStub, int32 argNum, int32 rdOrWrite )
    5.80 - { VSsTaskStubCarrier newCarrier;
    5.81 + { VSsTaskStubCarrier *newCarrier;
    5.82   
    5.83     newCarrier = VMS_PI__malloc( sizeof(VSsTaskStubCarrier) );
    5.84     newCarrier->taskStub = taskStub;
    5.85 @@ -203,8 +224,8 @@
    5.86   */
    5.87  void
    5.88  handleSubmitTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
    5.89 - { int64            key[] = {0,0,0};
    5.90 -   HashEntry       *rawHashEntry;
    5.91 + { uint32            key[3];
    5.92 +   HashEntry       *rawHashEntry; //has char *, but use with uint32 *
    5.93     VSsPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
    5.94     void           **args;
    5.95     VSsTaskStub     *taskStub;
    5.96 @@ -236,32 +257,40 @@
    5.97     int32 argNum;
    5.98     for( argNum = 0; argNum < taskType->numCtldArgs; argNum++ )
    5.99      { 
   5.100 -      key[0] = (int64)args[argNum];
   5.101 +      key[0] = 2; //two 32b values in key
   5.102 +      *( (uint64*)&key[1]) = (uint64)args[argNum];  //write 64b into two 32b
   5.103  
   5.104 -      //key[2] acts as the 0 that terminates the string
   5.105 -//BUG!  need new hash function that works on *pointers with zeros in*
   5.106        /*If the hash entry was chained, put it at the
   5.107         * start of the chain.  (Means no-longer-used pointers accumulate
   5.108         * at end of chain, decide garbage collection later) */
   5.109 -      rawHashEntry = getEntryFromTable( (char *)key, argPtrHashTbl );
   5.110 -      ptrEntry = (VSsPointerEntry *)rawHashEntry->content;
   5.111 -      if( ptrEntry == NULL )
   5.112 -       { ptrEntry = create_pointer_entry_and_insert( args[argNum] );
   5.113 +      rawHashEntry = getEntryFromTable32( key, argPtrHashTbl );
   5.114 +      if( rawHashEntry == NULL )
   5.115 +       {    //adding a value auto-creates the hash-entry
   5.116 +         ptrEntry = create_pointer_entry();
   5.117 +         rawHashEntry = addValueIntoTable32( key, ptrEntry, argPtrHashTbl );
   5.118         }
   5.119 +      else
   5.120 +       { ptrEntry = (VSsPointerEntry *)rawHashEntry->content;
   5.121 +         if( ptrEntry == NULL )
   5.122 +          { ptrEntry = create_pointer_entry();
   5.123 +            rawHashEntry = addValueIntoTable32(key, ptrEntry, argPtrHashTbl);
   5.124 +          }
   5.125 +       }
   5.126 +      taskStub->ptrEntries[argNum] = ptrEntry;
   5.127        
   5.128        /*Have the hash entry.
   5.129         *If the arg is a reader and the entry does not have an enabled
   5.130         * non-finished writer, and the queue is empty. */
   5.131        if( taskType->argTypes[argNum] == READER )
   5.132         { if( !ptrEntry->hasEnabledNonFinishedWriter && 
   5.133 -              isEmptyPrivQ( ptrEntry->waitersQ ) )
   5.134 +             isEmptyPrivQ( ptrEntry->waitersQ ) )
   5.135            { /*The reader is free.  So, decrement the blocking-propendent
   5.136               * count in the task-stub. If the count is zero, then put the
   5.137               * task-stub into the readyQ.  At the same time, increment
   5.138               * the hash-entry's count of enabled and non-finished readers.*/
   5.139              taskStub->numBlockingProp -= 1;
   5.140              if( taskStub->numBlockingProp == 0 )
   5.141 -             { writeQ( taskStub, semEnv->taskReadyQ );
   5.142 +             { writePrivQ( taskStub, semEnv->taskReadyQ );
   5.143               }
   5.144              ptrEntry->numEnabledNonDoneReaders += 1;
   5.145            }
   5.146 @@ -269,7 +298,7 @@
   5.147            { /*Otherwise, the reader is put into the hash-entry's Q of
   5.148               * waiters*/
   5.149              taskCarrier = create_task_carrier( taskStub, argNum, READER );
   5.150 -            writeQ( taskCarrier, ptrEntry->waitersQ );
   5.151 +            writePrivQ( taskCarrier, ptrEntry->waitersQ );
   5.152            }
   5.153         }
   5.154        else //arg is a writer
   5.155 @@ -284,14 +313,14 @@
   5.156                * into the readyQ.*/
   5.157              taskStub->numBlockingProp -= 1;
   5.158              if( taskStub->numBlockingProp == 0 )
   5.159 -             { writeQ( taskStub, semEnv->taskReadyQ );
   5.160 +             { writePrivQ( taskStub, semEnv->taskReadyQ );
   5.161               }
   5.162              ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   5.163            }
   5.164           else
   5.165            {/*Otherwise, put the writer into the entry's Q of waiters.*/
   5.166              taskCarrier = create_task_carrier( taskStub, argNum, WRITER );
   5.167 -            writeQ( taskCarrier, ptrEntry->waitersQ );            
   5.168 +            writePrivQ( taskCarrier, ptrEntry->waitersQ );            
   5.169            }
   5.170         }
   5.171      } //for argNum
   5.172 @@ -333,17 +362,23 @@
   5.173   * reader's task-stub.  If it reaches zero, then put the task-stub into the
   5.174   * readyQ.
   5.175   *Repeat until encounter a writer -- put that writer back into the Q.
   5.176 + * 
   5.177 + *May 2012 -- not keeping track of how many references to a given ptrEntry
   5.178 + * exist, so no way to garbage collect..
   5.179 + *TODO: Might be safe to delete an entry when task ends and waiterQ empty
   5.180 + * and no readers and no writers..
   5.181   */
   5.182  void
   5.183  handleEndTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
   5.184 - { int64             key[] = {0,0,0};
   5.185 + { uint32            key[3];
   5.186     HashEntry        *rawHashEntry;
   5.187 -   VSsPointerEntry  *entry; //contents of hash table entry for an arg pointer
   5.188 +   VSsPointerEntry  *ptrEntry; //contents of hash table entry for an arg pointer
   5.189     void            **args;
   5.190     VSsTaskStub      *endingTaskStub, *waitingTaskStub;
   5.191     VSsTaskType      *endingTaskType;
   5.192     VSsWaiterCarrier *waitingTaskCarrier;
   5.193 -   
   5.194 +   VSsPointerEntry **ptrEntries;
   5.195 +      
   5.196     HashTable *
   5.197     ptrHashTbl = semEnv->argPtrHashTbl;
   5.198     
   5.199 @@ -356,71 +391,83 @@
   5.200                  ((VSsSemData *)semReq->callingSlv->semanticData)->taskStub;
   5.201     args           = endingTaskStub->args;
   5.202     endingTaskType = endingTaskStub->taskType;
   5.203 +   ptrEntries     = endingTaskStub->ptrEntries; //saved in stub when create
   5.204     
   5.205     /*The task's controlled arguments are processed one by one.
   5.206 -    *Processing an argument means getting the hash of the pointer.
   5.207 +    *Processing an argument means getting arg-pointer's entry.
   5.208      */
   5.209     int32 argNum;
   5.210     for( argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++ )
   5.211      { 
   5.212 -      key[0] = (int64)args[argNum];
   5.213 +      /*
   5.214 +      key[0] = 2; //says are 2 32b values in key
   5.215 +      *( (uint64*)&key[1] ) = args[argNum];  //write 64b ptr into two 32b
   5.216  
   5.217 -      //key[2] acts as the 0 that terminates the string
   5.218 -//BUG!  need new hash function that works on *pointers with zeros in*
   5.219 -      /*If the hash entry was chained, put it at the
   5.220 +       /*If the hash entry was chained, put it at the
   5.221         * start of the chain.  (Means no-longer-used pointers accumulate
   5.222         * at end of chain, decide garbage collection later) 
   5.223 -       *NOTE: could put pointer directly to hash entry into task-stub 
   5.224 -       * when do lookup during task creation.*/
   5.225 -      rawHashEntry = getEntryFromTable( (char *)key, ptrHashTbl );
   5.226 -      entry = (VSsPointerEntry *)rawHashEntry->content;
   5.227 -      if( entry == NULL ) 
   5.228 +       */
   5.229 +      /*NOTE: don't do hash lookups here, instead, have a pointer to the
   5.230 +       * hash entry inside task-stub, put there during task creation.
   5.231 +      rawHashEntry = getEntryFromTable32( key, ptrHashTbl );
   5.232 +      ptrEntry = (VSsPointerEntry *)rawHashEntry->content;
   5.233 +      if( ptrEntry == NULL ) 
   5.234            VMS_App__throw_exception("hash entry NULL", NULL, NULL);
   5.235 +      */ 
   5.236        
   5.237 -      /*With the hash entry:  If the ending task was reader of this arg*/
   5.238 +      ptrEntry = ptrEntries[argNum];
   5.239 +      /*check if the ending task was reader of this arg*/
   5.240        if( endingTaskType->argTypes[argNum] == READER )
   5.241         { /*then decrement the enabled and non-finished reader-count in
   5.242            * the hash-entry. */ 
   5.243 -         entry->numEnabledNonDoneReaders -= 1;
   5.244 +         ptrEntry->numEnabledNonDoneReaders -= 1;
   5.245           
   5.246 -         /*If the count becomes zero, then take the next entry from the Q. It
   5.247 -          * should be a writer, or else there's a bug in this algorithm.*/
   5.248 -         if( entry->numEnabledNonDoneReaders == 0 )
   5.249 -          { waitingTaskCarrier = readQ( entry->waitersQ );
   5.250 +         /*If the count becomes zero, then take the next entry from the Q. 
   5.251 +          *It should be a writer, or else there's a bug in this algorithm.*/
   5.252 +         if( ptrEntry->numEnabledNonDoneReaders == 0 )
   5.253 +          { waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );
   5.254 +            if( waitingTaskCarrier == NULL ) 
   5.255 +             { //TODO: looks safe to delete the ptr entry at this point 
   5.256 +               continue; //next iter of loop
   5.257 +             }
   5.258 +            if( waitingTaskCarrier->type == READER ) 
   5.259 +               VMS_App__throw_exception("READER waiting", NULL, NULL);
   5.260 +                   
   5.261              waitingTaskStub = waitingTaskCarrier->taskStub;
   5.262              
   5.263 -            if( !waitingTaskCarrier->type == READER ) 
   5.264 -               VMS_App__throw_exception();
   5.265 -                   
   5.266              /*Set the hash-entry to have an enabled non-finished writer.*/
   5.267 -            entry->hasEnabledNonFinishedWriter = TRUE;
   5.268 +            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   5.269              
   5.270              /* Decrement the blocking-propendent-count of the writer's
   5.271               * task-stub.  If the count has reached zero, then put the
   5.272               * task-stub into the readyQ.*/
   5.273              waitingTaskStub->numBlockingProp -= 1;
   5.274              if( waitingTaskStub->numBlockingProp == 0 )
   5.275 -             { writeQ( waitingTaskStub, semEnv->taskReadyQ );
   5.276 +             { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
   5.277               }
   5.278            }
   5.279         }
   5.280        else /*the ending task is a writer of this arg*/ 
   5.281         { /*clear the enabled non-finished writer flag of the hash-entry.*/
   5.282 -         entry->hasEnabledNonFinishedWriter = FALSE;
   5.283 +         ptrEntry->hasEnabledNonFinishedWriter = FALSE;
   5.284           
   5.285           /*Take the next waiter from the hash-entry's Q.*/
   5.286 -         waitingTaskCarrier = readQ( entry->waitersQ );
   5.287 +         waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );
   5.288 +         if( waitingTaskCarrier == NULL )
   5.289 +          { //TODO: looks safe to delete ptr entry at this point
   5.290 +            continue; //go to next iter of loop, done here.
   5.291 +          }
   5.292           waitingTaskStub = waitingTaskCarrier->taskStub;
   5.293           
   5.294           /*If task is a writer of this hash-entry's pointer*/
   5.295           if( waitingTaskCarrier->type == WRITER ) 
   5.296            { /* then turn the flag back on.*/
   5.297 -            entry->hasEnabledNonFinishedWriter = TRUE;
   5.298 +            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   5.299              /*Decrement the writer's blocking-propendent-count in task-stub
   5.300               * If it becomes zero, then put the task-stub into the readyQ.*/
   5.301              waitingTaskStub->numBlockingProp -= 1;
   5.302              if( waitingTaskStub->numBlockingProp == 0 )
   5.303 -             { writeQ( waitingTaskStub, semEnv->taskReadyQ );
   5.304 +             { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
   5.305               }
   5.306            }
   5.307           else
   5.308 @@ -429,27 +476,28 @@
   5.309              while( TRUE ) /*The checks guarantee have a waiting reader*/
   5.310               { /*Increment the hash-entry's count of enabled non-finished
   5.311                  * readers.*/
   5.312 -               entry->numEnabledNonDoneReaders += 1;
   5.313 +               ptrEntry->numEnabledNonDoneReaders += 1;
   5.314  
   5.315                 /*Decrement the blocking propendents count of the reader's
   5.316                  * task-stub.  If it reaches zero, then put the task-stub
   5.317                  * into the readyQ.*/
   5.318                 waitingTaskStub->numBlockingProp -= 1;
   5.319                 if( waitingTaskStub->numBlockingProp == 0 )
   5.320 -                { writeQ( waitingTaskStub, semEnv->taskReadyQ );
   5.321 +                { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
   5.322                  }
   5.323                 /*Get next waiting task*/
   5.324 -               waitingTaskCarrier = peekQ( entry->waitersQ );
   5.325 +               waitingTaskCarrier = peekPrivQ( ptrEntry->waitersQ );
   5.326                 if( waitingTaskCarrier == NULL ) break;
   5.327                 if( waitingTaskCarrier->type == WRITER ) break;
   5.328 -               waitingTaskCarrier = readQ( entry->waitersQ );               
   5.329 +               waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );               
   5.330                 waitingTaskStub = waitingTaskCarrier->taskStub;
   5.331               }//while waiter is a reader
   5.332 -          }//first waiting task is a reader
   5.333 -       }//check of ending task, whether writer or reader
   5.334 +          }//if-else, first waiting task is a reader
   5.335 +       }//if-else, check of ending task, whether writer or reader
   5.336      }//for argnum in ending task
   5.337     
   5.338     //done ending the task, now free the stub + args copy
   5.339 +   VMS_PI__free( endingTaskStub->ptrEntries );
   5.340     VMS_PI__free( endingTaskStub );
   5.341     
   5.342     //Resume the slave that animated the task -- assigner will give new task
   5.343 @@ -465,24 +513,24 @@
   5.344  /*
   5.345   */
   5.346  void
   5.347 -handleMalloc( VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv )
   5.348 +handleMalloc( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
   5.349   { void *ptr;
   5.350   
   5.351 -      DEBUG__printf1(dbgRqstHdlr,"Malloc request from processor %d",requestingPr->slaveID)
   5.352 +      DEBUG__printf1(dbgRqstHdlr,"Malloc request from processor %d",requestingSlv->slaveID)
   5.353  
   5.354     ptr = VMS_PI__malloc( semReq->sizeToMalloc );
   5.355 -   requestingPr->dataRetFromReq = ptr;
   5.356 -   resume_slaveVP( requestingPr, semEnv );
   5.357 +   requestingSlv->dataRetFromReq = ptr;
   5.358 +   resume_slaveVP( requestingSlv, semEnv );
   5.359   }
   5.360  
   5.361  /*
   5.362   */
   5.363  void
   5.364 -handleFree( VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv )
   5.365 +handleFree( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
   5.366   {
   5.367 -         DEBUG__printf1(dbgRqstHdlr,"Free request from processor %d",requestingPr->slaveID)
   5.368 +         DEBUG__printf1(dbgRqstHdlr,"Free request from processor %d",requestingSlv->slaveID)
   5.369     VMS_PI__free( semReq->ptrToFree );
   5.370 -   resume_slaveVP( requestingPr, semEnv );
   5.371 +   resume_slaveVP( requestingSlv, semEnv );
   5.372   }
   5.373  
   5.374  
   5.375 @@ -492,43 +540,43 @@
   5.376   * end-label.  Else, sets flag and resumes normally.
   5.377   */
   5.378  void inline
   5.379 -handleStartSingleton_helper( VSsSingleton *singleton, SlaveVP *reqstingPr,
   5.380 +handleStartSingleton_helper( VSsSingleton *singleton, SlaveVP *reqstingSlv,
   5.381                               VSsSemEnv    *semEnv )
   5.382   {
   5.383     if( singleton->hasFinished )
   5.384      {    //the code that sets the flag to true first sets the end instr addr
   5.385 -      reqstingPr->dataRetFromReq = singleton->endInstrAddr;
   5.386 -      resume_slaveVP( reqstingPr, semEnv );
   5.387 +      reqstingSlv->dataRetFromReq = singleton->endInstrAddr;
   5.388 +      resume_slaveVP( reqstingSlv, semEnv );
   5.389        return;
   5.390      }
   5.391     else if( singleton->hasBeenStarted )
   5.392      {    //singleton is in-progress in a diff slave, so wait for it to finish
   5.393 -      writePrivQ(reqstingPr, singleton->waitQ );
   5.394 +      writePrivQ(reqstingSlv, singleton->waitQ );
   5.395        return;
   5.396      }
   5.397     else
   5.398      {    //hasn't been started, so this is the first attempt at the singleton
   5.399        singleton->hasBeenStarted = TRUE;
   5.400 -      reqstingPr->dataRetFromReq = 0x0;
   5.401 -      resume_slaveVP( reqstingPr, semEnv );
   5.402 +      reqstingSlv->dataRetFromReq = 0x0;
   5.403 +      resume_slaveVP( reqstingSlv, semEnv );
   5.404        return;
   5.405      }
   5.406   }
   5.407  void inline
   5.408 -handleStartFnSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   5.409 +handleStartFnSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
   5.410                        VSsSemEnv *semEnv )
   5.411   { VSsSingleton *singleton;
   5.412 -         DEBUG__printf1(dbgRqstHdlr,"StartFnSingleton request from processor %d",requestingPr->slaveID)
   5.413 +         DEBUG__printf1(dbgRqstHdlr,"StartFnSingleton request from processor %d",requestingSlv->slaveID)
   5.414  
   5.415     singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
   5.416 -   handleStartSingleton_helper( singleton, requestingPr, semEnv );
   5.417 +   handleStartSingleton_helper( singleton, requestingSlv, semEnv );
   5.418   }
   5.419  void inline
   5.420 -handleStartDataSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   5.421 +handleStartDataSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
   5.422                        VSsSemEnv *semEnv )
   5.423   { VSsSingleton *singleton;
   5.424  
   5.425 -         DEBUG__printf1(dbgRqstHdlr,"StartDataSingleton request from processor %d",requestingPr->slaveID)
   5.426 +         DEBUG__printf1(dbgRqstHdlr,"StartDataSingleton request from processor %d",requestingSlv->slaveID)
   5.427     if( *(semReq->singletonPtrAddr) == NULL )
   5.428      { singleton                 = VMS_PI__malloc( sizeof(VSsSingleton) );
   5.429        singleton->waitQ          = makeVMSQ();
   5.430 @@ -539,21 +587,21 @@
   5.431      }
   5.432     else
   5.433        singleton = *(semReq->singletonPtrAddr);
   5.434 -   handleStartSingleton_helper( singleton, requestingPr, semEnv );
   5.435 +   handleStartSingleton_helper( singleton, requestingSlv, semEnv );
   5.436   }
   5.437  
   5.438  
   5.439  void inline
   5.440 -handleEndSingleton_helper( VSsSingleton *singleton, SlaveVP *requestingPr,
   5.441 +handleEndSingleton_helper( VSsSingleton *singleton, SlaveVP *requestingSlv,
   5.442                             VSsSemEnv    *semEnv )
   5.443   { PrivQueueStruc *waitQ;
   5.444     int32           numWaiting, i;
   5.445 -   SlaveVP      *resumingPr;
   5.446 +   SlaveVP      *resumingSlv;
   5.447  
   5.448     if( singleton->hasFinished )
   5.449      { //by definition, only one slave should ever be able to run end singleton
   5.450        // so if this is true, is an error
   5.451 -      ERROR1( "singleton code ran twice", requestingPr );
   5.452 +      ERROR1( "singleton code ran twice", requestingSlv );
   5.453      }
   5.454  
   5.455     singleton->hasFinished = TRUE;
   5.456 @@ -561,35 +609,35 @@
   5.457     numWaiting = numInPrivQ( waitQ );
   5.458     for( i = 0; i < numWaiting; i++ )
   5.459      {    //they will resume inside start singleton, then jmp to end singleton
   5.460 -      resumingPr = readPrivQ( waitQ );
   5.461 -      resumingPr->dataRetFromReq = singleton->endInstrAddr;
   5.462 -      resume_slaveVP( resumingPr, semEnv );
   5.463 +      resumingSlv = readPrivQ( waitQ );
   5.464 +      resumingSlv->dataRetFromReq = singleton->endInstrAddr;
   5.465 +      resume_slaveVP( resumingSlv, semEnv );
   5.466      }
   5.467  
   5.468 -   resume_slaveVP( requestingPr, semEnv );
   5.469 +   resume_slaveVP( requestingSlv, semEnv );
   5.470  
   5.471  }
   5.472  void inline
   5.473 -handleEndFnSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   5.474 +handleEndFnSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
   5.475                          VSsSemEnv *semEnv )
   5.476   {
   5.477     VSsSingleton   *singleton;
   5.478  
   5.479 -         DEBUG__printf1(dbgRqstHdlr,"EndFnSingleton request from processor %d",requestingPr->slaveID)
   5.480 +         DEBUG__printf1(dbgRqstHdlr,"EndFnSingleton request from processor %d",requestingSlv->slaveID)
   5.481     
   5.482     singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
   5.483 -   handleEndSingleton_helper( singleton, requestingPr, semEnv );
   5.484 +   handleEndSingleton_helper( singleton, requestingSlv, semEnv );
   5.485    }
   5.486  void inline
   5.487 -handleEndDataSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   5.488 +handleEndDataSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
   5.489                          VSsSemEnv *semEnv )
   5.490   {
   5.491     VSsSingleton   *singleton;
   5.492  
   5.493 -         DEBUG__printf1(dbgRqstHdlr,"EndDataSingleton request from processor %d",requestingPr->slaveID)
   5.494 +         DEBUG__printf1(dbgRqstHdlr,"EndDataSingleton request from processor %d",requestingSlv->slaveID)
   5.495     
   5.496     singleton = *(semReq->singletonPtrAddr);
   5.497 -   handleEndSingleton_helper( singleton, requestingPr, semEnv );
   5.498 +   handleEndSingleton_helper( singleton, requestingSlv, semEnv );
   5.499    }
   5.500  
   5.501  
   5.502 @@ -597,11 +645,11 @@
   5.503   * pointer out of the request and call it, then resume the VP.
   5.504   */
   5.505  void
   5.506 -handleAtomic( VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv )
   5.507 +handleAtomic( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
   5.508   {
   5.509 -         DEBUG__printf1(dbgRqstHdlr,"Atomic request from processor %d",requestingPr->slaveID)
   5.510 +         DEBUG__printf1(dbgRqstHdlr,"Atomic request from processor %d",requestingSlv->slaveID)
   5.511     semReq->fnToExecInMaster( semReq->dataForFn );
   5.512 -   resume_slaveVP( requestingPr, semEnv );
   5.513 +   resume_slaveVP( requestingSlv, semEnv );
   5.514   }
   5.515  
   5.516  /*First, it looks at the VP's semantic data, to see the highest transactionID
   5.517 @@ -619,18 +667,18 @@
   5.518   *If NULL, then write requesting into the field and resume.
   5.519   */
   5.520  void
   5.521 -handleTransStart( VSsSemReq *semReq, SlaveVP *requestingPr,
   5.522 +handleTransStart( VSsSemReq *semReq, SlaveVP *requestingSlv,
   5.523                    VSsSemEnv *semEnv )
   5.524   { VSsSemData *semData;
   5.525     TransListElem *nextTransElem;
   5.526  
   5.527 -         DEBUG__printf1(dbgRqstHdlr,"TransStart request from processor %d",requestingPr->slaveID)
   5.528 +         DEBUG__printf1(dbgRqstHdlr,"TransStart request from processor %d",requestingSlv->slaveID)
   5.529     
   5.530        //check ordering of entering transactions is correct
   5.531 -   semData = requestingPr->semanticData;
   5.532 +   semData = requestingSlv->semanticData;
   5.533     if( semData->highestTransEntered > semReq->transID )
   5.534      {    //throw VMS exception, which shuts down VMS.
   5.535 -      VMS_PI__throw_exception( "transID smaller than prev", requestingPr, NULL);
   5.536 +      VMS_PI__throw_exception( "transID smaller than prev", requestingSlv, NULL);
   5.537      }
   5.538        //add this trans ID to the list of transactions entered -- check when
   5.539        // end a transaction
   5.540 @@ -646,14 +694,14 @@
   5.541  
   5.542     if( transStruc->VPCurrentlyExecuting == NULL )
   5.543      {
   5.544 -      transStruc->VPCurrentlyExecuting = requestingPr;
   5.545 -      resume_slaveVP( requestingPr, semEnv );
   5.546 +      transStruc->VPCurrentlyExecuting = requestingSlv;
   5.547 +      resume_slaveVP( requestingSlv, semEnv );
   5.548      }
   5.549     else
   5.550      {    //note, might make future things cleaner if save request with VP and
   5.551           // add this trans ID to the linked list when gets out of queue.
   5.552           // but don't need for now, and lazy..
   5.553 -      writePrivQ( requestingPr, transStruc->waitingVPQ );
   5.554 +      writePrivQ( requestingSlv, transStruc->waitingVPQ );
   5.555      }
   5.556   }
   5.557  
   5.558 @@ -672,38 +720,38 @@
   5.559   * resume both.
   5.560   */
   5.561  void
   5.562 -handleTransEnd(VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv)
   5.563 +handleTransEnd(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv)
   5.564   { VSsSemData    *semData;
   5.565 -   SlaveVP     *waitingPr;
   5.566 +   SlaveVP     *waitingSlv;
   5.567     VSsTrans      *transStruc;
   5.568     TransListElem *lastTrans;
   5.569     
   5.570 -         DEBUG__printf1(dbgRqstHdlr,"TransEnd request from processor %d",requestingPr->slaveID)
   5.571 +         DEBUG__printf1(dbgRqstHdlr,"TransEnd request from processor %d",requestingSlv->slaveID)
   5.572     
   5.573     transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
   5.574  
   5.575        //make sure transaction ended in same VP as started it.
   5.576 -   if( transStruc->VPCurrentlyExecuting != requestingPr )
   5.577 +   if( transStruc->VPCurrentlyExecuting != requestingSlv )
   5.578      {
   5.579 -      VMS_PI__throw_exception( "trans ended in diff VP", requestingPr, NULL );
   5.580 +      VMS_PI__throw_exception( "trans ended in diff VP", requestingSlv, NULL );
   5.581      }
   5.582  
   5.583        //make sure nesting is correct -- last ID entered should == this ID
   5.584 -   semData = requestingPr->semanticData;
   5.585 +   semData = requestingSlv->semanticData;
   5.586     lastTrans = semData->lastTransEntered;
   5.587     if( lastTrans->transID != semReq->transID )
   5.588      {
   5.589 -      VMS_PI__throw_exception( "trans incorrectly nested", requestingPr, NULL );
   5.590 +      VMS_PI__throw_exception( "trans incorrectly nested", requestingSlv, NULL );
   5.591      }
   5.592  
   5.593     semData->lastTransEntered = semData->lastTransEntered->nextTrans;
   5.594  
   5.595  
   5.596 -   waitingPr = readPrivQ( transStruc->waitingVPQ );
   5.597 -   transStruc->VPCurrentlyExecuting = waitingPr;
   5.598 +   waitingSlv = readPrivQ( transStruc->waitingVPQ );
   5.599 +   transStruc->VPCurrentlyExecuting = waitingSlv;
   5.600  
   5.601 -   if( waitingPr != NULL )
   5.602 -      resume_slaveVP( waitingPr, semEnv );
   5.603 +   if( waitingSlv != NULL )
   5.604 +      resume_slaveVP( waitingSlv, semEnv );
   5.605  
   5.606 -   resume_slaveVP( requestingPr, semEnv );
   5.607 +   resume_slaveVP( requestingSlv, semEnv );
   5.608   }