changeset 2:f2ed1c379fe7

code nearly complete.. about to begin debugging
author Sean Halle <seanhalle@yahoo.com>
date Wed, 30 May 2012 15:02:38 -0700
parents 5ed4d833506e
children 468b8638ff92
files Measurement/VSs_Counter_Recording.c Measurement/VSs_Counter_Recording.h Measurement/VSs_Measurement.h VSs.c VSs.h VSs_PluginFns.c VSs_Request_Handlers.c VSs_Request_Handlers.h
diffstat 8 files changed, 814 insertions(+), 846 deletions(-) [+]
line diff
     1.1 --- a/Measurement/VSs_Counter_Recording.c	Thu May 24 07:34:21 2012 -0700
     1.2 +++ b/Measurement/VSs_Counter_Recording.c	Wed May 30 15:02:38 2012 -0700
     1.3 @@ -3,14 +3,14 @@
     1.4   * author: Nina Engelhardt
     1.5   */
     1.6  
     1.7 -#include "VOMP_Counter_Recording.h"
     1.8 +#include "VSs_Counter_Recording.h"
     1.9  #include "VMS_impl/VMS.h"
    1.10 -#include "VOMP.h"
    1.11 +#include "VSs.h"
    1.12  
    1.13  #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    1.14  
    1.15 -void VOMP__init_counter_data_structs(){
    1.16 -    VOMPSemEnv *semanticEnv = _VMSMasterEnv->semanticEnv;
    1.17 +void VSs__init_counter_data_structs(){
    1.18 +    VSsSemEnv *semanticEnv = _VMSMasterEnv->semanticEnv;
    1.19      int i;
    1.20      for(i=0;i<NUM_CORES;i++){
    1.21          semanticEnv->counterList[i] = makeListOfArrays(sizeof(CounterEvent), 128);
    1.22 @@ -28,7 +28,7 @@
    1.23      list->next_free_index++; 
    1.24  }
    1.25  
    1.26 -void VOMP__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs)
    1.27 +void VSs__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs)
    1.28  {
    1.29      
    1.30      if (pr->typeOfVP == Master || pr->typeOfVP == Shutdown)
    1.31 @@ -36,7 +36,7 @@
    1.32          return;
    1.33       }
    1.34  
    1.35 -    VOMPSemEnv *semanticEnv = _VMSMasterEnv->semanticEnv;
    1.36 +    VSsSemEnv *semanticEnv = _VMSMasterEnv->semanticEnv;
    1.37              
    1.38      CounterEvent e;
    1.39      e.event_type = evt_type;
     2.1 --- a/Measurement/VSs_Counter_Recording.h	Thu May 24 07:34:21 2012 -0700
     2.2 +++ b/Measurement/VSs_Counter_Recording.h	Wed May 30 15:02:38 2012 -0700
     2.3 @@ -1,12 +1,12 @@
     2.4  /* 
     2.5 - * File:   VOMP_Counter_Recording.h
     2.6 + * File:   VSs_Counter_Recording.h
     2.7   * Author: nengel
     2.8   *
     2.9   * Created on January 11, 2012, 3:03 PM
    2.10   */
    2.11  
    2.12 -#ifndef VOMP_COUNTER_RECORDING_H
    2.13 -#define	VOMP_COUNTER_RECORDING_H
    2.14 +#ifndef VSs_COUNTER_RECORDING_H
    2.15 +#define	VSs_COUNTER_RECORDING_H
    2.16  
    2.17  #include "VMS_impl/VMS.h"
    2.18  
    2.19 @@ -22,12 +22,12 @@
    2.20  
    2.21  FILE* counterfile;
    2.22  
    2.23 -void VOMP__init_counter_data_structs();
    2.24 +void VSs__init_counter_data_structs();
    2.25  
    2.26 -void VOMP__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs);
    2.27 +void VSs__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs);
    2.28  
    2.29  void set_counter_file(FILE* f);
    2.30  
    2.31  void print_counter_events_to_file(void* _e);
    2.32 -#endif	/* VOMP_COUNTER_RECORDING_H */
    2.33 +#endif	/* VSs_COUNTER_RECORDING_H */
    2.34  
     3.1 --- a/Measurement/VSs_Measurement.h	Thu May 24 07:34:21 2012 -0700
     3.2 +++ b/Measurement/VSs_Measurement.h	Wed May 30 15:02:38 2012 -0700
     3.3 @@ -6,8 +6,8 @@
     3.4   *
     3.5   */
     3.6  
     3.7 -#ifndef _VOMP_MEAS_H
     3.8 -#define	_VOMP_MEAS_H
     3.9 +#ifndef _VSs_MEAS_H
    3.10 +#define	_VSs_MEAS_H
    3.11  
    3.12  
    3.13  #ifdef MEAS__TURN_ON_LANG_MEAS
     4.1 --- a/VSs.c	Thu May 24 07:34:21 2012 -0700
     4.2 +++ b/VSs.c	Wed May 30 15:02:38 2012 -0700
     4.3 @@ -11,16 +11,16 @@
     4.4  #include "Queue_impl/PrivateQueue.h"
     4.5  #include "Hash_impl/PrivateHash.h"
     4.6  
     4.7 -#include "VOMP.h"
     4.8 -#include "VOMP_Counter_Recording.h"
     4.9 +#include "VSs.h"
    4.10 +#include "VSs_Counter_Recording.h"
    4.11  
    4.12  //==========================================================================
    4.13  
    4.14  void
    4.15 -VOMP__init();
    4.16 +VSs__init();
    4.17  
    4.18  void
    4.19 -VOMP__init_Helper();
    4.20 +VSs__init_Helper();
    4.21  //==========================================================================
    4.22  
    4.23  
    4.24 @@ -32,24 +32,24 @@
    4.25   * 
    4.26   *There's a pattern for the outside sequential code to interact with the
    4.27   * VMS_HW code.
    4.28 - *The VMS_HW system is inside a boundary..  every VOMP system is in its
    4.29 + *The VMS_HW system is inside a boundary..  every VSs system is in its
    4.30   * own directory that contains the functions for each of the processor types.
    4.31   * One of the processor types is the "seed" processor that starts the
    4.32   * cascade of creating all the processors that do the work.
    4.33   *So, in the directory is a file called "EntryPoint.c" that contains the
    4.34   * function, named appropriately to the work performed, that the outside
    4.35   * sequential code calls.  This function follows a pattern:
    4.36 - *1) it calls VOMP__init()
    4.37 + *1) it calls VSs__init()
    4.38   *2) it creates the initial data for the seed processor, which is passed
    4.39   *    in to the function
    4.40 - *3) it creates the seed VOMP processor, with the data to start it with.
    4.41 - *4) it calls startVOMPThenWaitUntilWorkDone
    4.42 + *3) it creates the seed VSs processor, with the data to start it with.
    4.43 + *4) it calls startVSsThenWaitUntilWorkDone
    4.44   *5) it gets the returnValue from the transfer struc and returns that
    4.45   *    from the function
    4.46   *
    4.47 - *For now, a new VOMP system has to be created via VOMP__init every
    4.48 + *For now, a new VSs system has to be created via VSs__init every
    4.49   * time an entry point function is called -- later, might add letting the
    4.50 - * VOMP system be created once, and let all the entry points just reuse
    4.51 + * VSs system be created once, and let all the entry points just reuse
    4.52   * it -- want to be as simple as possible now, and see by using what makes
    4.53   * sense for later..
    4.54   */
    4.55 @@ -72,41 +72,41 @@
    4.56   * any of the data reachable from initData passed in to here
    4.57   */
    4.58  void
    4.59 -VOMP__create_seed_procr_and_do_work( TopLevelFnPtr fnPtr, void *initData )
    4.60 - { VOMPSemEnv *semEnv;
    4.61 +VSs__create_seed_slave_and_do_work( TopLevelFnPtr fnPtr, void *initData )
    4.62 + { VSsSemEnv *semEnv;
    4.63     SlaveVP *seedPr;
    4.64  
    4.65 -   VOMP__init();      //normal multi-thd
    4.66 +   VSs__init();      //normal multi-thd
    4.67     
    4.68     semEnv = _VMSMasterEnv->semanticEnv;
    4.69  
    4.70 -      //VOMP starts with one processor, which is put into initial environ,
    4.71 +      //VSs starts with one processor, which is put into initial environ,
    4.72        // and which then calls create() to create more, thereby expanding work
    4.73 -   seedPr = VOMP__create_procr_helper( fnPtr, initData,
    4.74 +   seedPr = VSs__create_slave_helper( fnPtr, initData,
    4.75                                        semEnv, semEnv->nextCoreToGetNewPr++ );
    4.76  
    4.77     resume_slaveVP( seedPr, semEnv );
    4.78     
    4.79     VMS_SS__start_the_work_then_wait_until_done();      //normal multi-thd
    4.80  
    4.81 -   VOMP__cleanup_after_shutdown();
    4.82 +   VSs__cleanup_after_shutdown();
    4.83   }
    4.84  
    4.85  
    4.86  int32
    4.87 -VOMP__giveMinWorkUnitCycles( float32 percentOverhead )
    4.88 +VSs__giveMinWorkUnitCycles( float32 percentOverhead )
    4.89   {
    4.90     return MIN_WORK_UNIT_CYCLES;
    4.91   }
    4.92  
    4.93  int32
    4.94 -VOMP__giveIdealNumWorkUnits()
    4.95 +VSs__giveIdealNumWorkUnits()
    4.96   {
    4.97     return NUM_ANIM_SLOTS * NUM_CORES;
    4.98   }
    4.99  
   4.100  int32
   4.101 -VOMP__give_number_of_cores_to_schedule_onto()
   4.102 +VSs__give_number_of_cores_to_schedule_onto()
   4.103   {
   4.104     return NUM_CORES;
   4.105   }
   4.106 @@ -115,8 +115,8 @@
   4.107   * saves jump point, and second jumps back several times to get reliable time
   4.108   */
   4.109  void
   4.110 -VOMP__start_primitive()
   4.111 - { saveLowTimeStampCountInto( ((VOMPSemEnv *)(_VMSMasterEnv->semanticEnv))->
   4.112 +VSs__start_primitive()
   4.113 + { saveLowTimeStampCountInto( ((VSsSemEnv *)(_VMSMasterEnv->semanticEnv))->
   4.114                                primitiveStartTime );
   4.115   }
   4.116  
   4.117 @@ -126,17 +126,17 @@
   4.118   * also to throw out any "weird" values due to OS interrupt or TSC rollover
   4.119   */
   4.120  int32
   4.121 -VOMP__end_primitive_and_give_cycles()
   4.122 +VSs__end_primitive_and_give_cycles()
   4.123   { int32 endTime, startTime;
   4.124     //TODO: fix by repeating time-measurement
   4.125     saveLowTimeStampCountInto( endTime );
   4.126 -   startTime =((VOMPSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
   4.127 +   startTime =((VSsSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
   4.128     return (endTime - startTime);
   4.129   }
   4.130  
   4.131  //===========================================================================
   4.132  
   4.133 -/*Initializes all the data-structures for a VOMP system -- but doesn't
   4.134 +/*Initializes all the data-structures for a VSs system -- but doesn't
   4.135   * start it running yet!
   4.136   *
   4.137   *This runs in the main thread -- before VMS starts up
   4.138 @@ -147,13 +147,13 @@
   4.139   * for creating the seed processor and then starting the work.
   4.140   */
   4.141  void
   4.142 -VOMP__init()
   4.143 +VSs__init()
   4.144   {
   4.145     VMS_SS__init();
   4.146        //masterEnv, a global var, now is partially set up by init_VMS
   4.147        // after this, have VMS_int__malloc and VMS_int__free available
   4.148  
   4.149 -   VOMP__init_Helper();
   4.150 +   VSs__init_Helper();
   4.151   }
   4.152  
   4.153  
   4.154 @@ -164,25 +164,25 @@
   4.155  }
   4.156  
   4.157  void
   4.158 -VOMP__init_Helper()
   4.159 - { VOMPSemEnv       *semanticEnv;
   4.160 +VSs__init_Helper()
   4.161 + { VSsSemEnv       *semanticEnv;
   4.162     PrivQueueStruc **readyVPQs;
   4.163     int              coreIdx, i, j;
   4.164   
   4.165        //Hook up the semantic layer's plug-ins to the Master virt procr
   4.166 -   _VMSMasterEnv->requestHandler = &VOMP__Request_Handler;
   4.167 -   _VMSMasterEnv->slaveAssigner  = &VOMP__assign_slaveVP_to_slot;
   4.168 +   _VMSMasterEnv->requestHandler = &VSs__Request_Handler;
   4.169 +   _VMSMasterEnv->slaveAssigner  = &VSs__assign_slaveVP_to_slot;
   4.170     #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
   4.171 -   _VMSMasterEnv->counterHandler = &VOMP__counter_handler;
   4.172 +   _VMSMasterEnv->counterHandler = &VSs__counter_handler;
   4.173     #endif
   4.174  
   4.175        //create the semantic layer's environment (all its data) and add to
   4.176        // the master environment
   4.177 -   semanticEnv = VMS_int__malloc( sizeof( VOMPSemEnv ) );
   4.178 +   semanticEnv = VMS_int__malloc( sizeof( VSsSemEnv ) );
   4.179     _VMSMasterEnv->semanticEnv = semanticEnv;
   4.180     
   4.181     #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
   4.182 -   VOMP__init_counter_data_structs();
   4.183 +   VSs__init_counter_data_structs();
   4.184     #endif
   4.185     semanticEnv->shutdownInitiated = FALSE;
   4.186     for(i=0;i<NUM_CORES;++i){
   4.187 @@ -219,7 +219,7 @@
   4.188     semanticEnv->nextCoreToGetNewPr = 0;
   4.189     semanticEnv->numSlaveVP = 0;
   4.190     
   4.191 -   semanticEnv->commHashTbl  = makeHashTable( 1<<16, &VMS_int__free );//start big
   4.192 +   semanticEnv->argPtrHashTbl  = makeHashTable( 1<<16, &VMS_int__free );//start big
   4.193  
   4.194     //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
   4.195     //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
   4.196 @@ -235,11 +235,11 @@
   4.197   }
   4.198  
   4.199  
   4.200 -/*Frees any memory allocated by VOMP__init() then calls VMS_int__shutdown
   4.201 +/*Frees any memory allocated by VSs__init() then calls VMS_int__shutdown
   4.202   */
   4.203  void
   4.204 -VOMP__cleanup_after_shutdown()
   4.205 - { VOMPSemEnv *semanticEnv;
   4.206 +VSs__cleanup_after_shutdown()
   4.207 + { VSsSemEnv *semanticEnv;
   4.208     
   4.209     semanticEnv = _VMSMasterEnv->semanticEnv;
   4.210  
   4.211 @@ -374,10 +374,10 @@
   4.212  
   4.213  /*
   4.214   */
   4.215 -  SlaveVP *
   4.216 -VOMP__create_procr_with( TopLevelFnPtr fnPtr,   void *initData,
   4.217 +SlaveVP *
   4.218 +VSs__create_slave_with( TopLevelFnPtr fnPtr,   void *initData,
   4.219                          SlaveVP *creatingPr )
   4.220 - { VOMPSemReq reqData;
   4.221 + { VSsSemReq reqData;
   4.222  
   4.223        //the semantic request data is on the stack and disappears when this
   4.224        // call returns -- it's guaranteed to remain in the VP's stack for as
   4.225 @@ -386,26 +386,26 @@
   4.226     reqData.coreToAssignOnto = -1; //means round-robin assign
   4.227     reqData.fnPtr              = fnPtr;
   4.228     reqData.initData           = initData;
   4.229 -   reqData.sendPr             = creatingPr;
   4.230 +   reqData.callingSlv             = creatingPr;
   4.231  
   4.232     VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
   4.233  
   4.234     return creatingPr->dataRetFromReq;
   4.235   }
   4.236  
   4.237 -  SlaveVP *
   4.238 -VOMP__create_procr_with_affinity( TopLevelFnPtr fnPtr, void *initData,
   4.239 +SlaveVP *
   4.240 +VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
   4.241                          SlaveVP *creatingPr,  int32  coreToAssignOnto )
   4.242 - { VOMPSemReq  reqData;
   4.243 + { VSsSemReq  reqData;
   4.244  
   4.245        //the semantic request data is on the stack and disappears when this
   4.246        // call returns -- it's guaranteed to remain in the VP's stack for as
   4.247        // long as the VP is suspended.
   4.248 -   reqData.reqType            = 0; //know type because in a VMS create req
   4.249 -   reqData.coreToAssignOnto = coreToAssignOnto;
   4.250 +   reqData.reqType            = create_slave;
   4.251 +   reqData.coreToAssignOnto   = coreToAssignOnto;
   4.252     reqData.fnPtr              = fnPtr;
   4.253     reqData.initData           = initData;
   4.254 -   reqData.sendPr             = creatingPr;
   4.255 +   reqData.callingSlv         = creatingPr;
   4.256  
   4.257     VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
   4.258  
   4.259 @@ -413,182 +413,62 @@
   4.260   }
   4.261  
   4.262  
   4.263 -  void
   4.264 -VOMP__dissipate_procr( SlaveVP *procrToDissipate )
   4.265 +void
   4.266 +VSs__dissipate_slave( SlaveVP *slaveToDissipate )
   4.267   {
   4.268 -   VMS_WL__send_dissipate_req( procrToDissipate );
   4.269 +   VMS_WL__send_dissipate_req( slaveToDissipate );
   4.270   }
   4.271  
   4.272  
   4.273  //===========================================================================
   4.274  
   4.275 -void *
   4.276 -VOMP__malloc_to( int32 sizeToMalloc, SlaveVP *owningPr )
   4.277 - { VOMPSemReq reqData;
   4.278  
   4.279 -   reqData.reqType      = malloc_req;
   4.280 -   reqData.sendPr       = owningPr;
   4.281 -   reqData.sizeToMalloc = sizeToMalloc;
   4.282 +//===========================================================================
   4.283 +/*Returns a taskID, which can be used to communicate between tasks with
   4.284 + * send-receive, or to use other kinds of constructs with tasks.
   4.285 + */
   4.286 +int32
   4.287 +VSs__submit_task( VSsTaskType *taskType, void *args, SlaveVP *animSlv)
   4.288 + { VSsSemReq  reqData;
   4.289  
   4.290 -   VMS_WL__send_sem_request( &reqData, owningPr );
   4.291 -
   4.292 -   return owningPr->dataRetFromReq;
   4.293 +   reqData.reqType    = submit_task;
   4.294 +   reqData.callingSlv = animSlv;
   4.295 +   reqData.taskType   = taskType;
   4.296 +   reqData.args       = args;
   4.297 +  
   4.298 + 
   4.299 +   VMS_WL__send_sem_request( &reqData, animSlv );
   4.300 +   return animSlv->dataRetFromReq;
   4.301   }
   4.302  
   4.303 -
   4.304 -/*Sends request to Master, which does the work of freeing
   4.305 +/*NOTE: if want, don't need to send the animating SlaveVP around.. 
   4.306 + * instead, can make a single slave per core, and coreCtrlr looks up the
   4.307 + * slave from having the core number.
   4.308 + * 
   4.309 + *But, to stay compatible with all the other VMS languages, leave it in..
   4.310 + *
   4.311 + *This call is the last to happen in every task.  It causes the slave to
   4.312 + * suspend and get the next task out of the task-queue.  Notice there is no
   4.313 + * assigner here.. only one slave, no slave ReadyQ, and so on..
   4.314 + *Can either make the assigner take the next task out of the taskQ, or can
   4.315 + * leave all as it is, and make task-end take the next task.
   4.316 + *Note: this fits the case in the new VMS for no-context tasks, so will use
   4.317 + * the built-in taskQ of new VMS, and should be local and much faster.
   4.318 + * 
   4.319 + *The task-stub is saved in the animSlv, so the request handler will get it
   4.320 + * from there, along with the task-type which has arg types, and so on..
   4.321   */
   4.322  void
   4.323 -VOMP__free( void *ptrToFree, SlaveVP *owningPr )
   4.324 - { VOMPSemReq reqData;
   4.325 +VSs__end_task( SlaveVP *animSlv )
   4.326 + { VSsSemReq  reqData;
   4.327  
   4.328 -   reqData.reqType      = free_req;
   4.329 -   reqData.sendPr       = owningPr;
   4.330 -   reqData.ptrToFree    = ptrToFree;
   4.331 -
   4.332 -   VMS_WL__send_sem_request( &reqData, owningPr );
   4.333 +   reqData.reqType      = end_task;
   4.334 +   reqData.callingSlv   = animSlv;
   4.335 +   
   4.336 +   VMS_WL__send_sem_request( &reqData, animSlv );
   4.337   }
   4.338  
   4.339 -
   4.340 -void
   4.341 -VOMP__transfer_ownership_of_from_to( void *data, SlaveVP *oldOwnerSlv,
   4.342 -                                                  SlaveVP *newOwnerPr )
   4.343 - {
   4.344 -   //TODO: put in the ownership system that automatically frees when no
   4.345 -   // owners of data left -- will need keeper for keeping data around when
   4.346 -   // future created processors might need it but don't exist yet
   4.347 - }
   4.348 -
   4.349 -
   4.350 -void
   4.351 -VOMP__add_ownership_by_to( SlaveVP *newOwnerSlv, void *data )
   4.352 - {
   4.353 -
   4.354 - }
   4.355 -
   4.356 -
   4.357 -void
   4.358 -VOMP__remove_ownership_by_from( SlaveVP *loserSlv, void *dataLosing )
   4.359 - {
   4.360 -
   4.361 - }
   4.362 -
   4.363 -
   4.364 -/*Causes the VOMP system to remove internal ownership, so data won't be
   4.365 - * freed when VOMP shuts down, and will persist in the external program.
   4.366 - *
   4.367 - *Must be called from the processor that currently owns the data.
   4.368 - *
   4.369 - *IMPL: Transferring ownership touches two different virtual processor's
   4.370 - * state -- which means it has to be done carefully -- the VMS rules for
   4.371 - * semantic layers say that a work-unit is only allowed to touch the
   4.372 - * virtual processor it is part of, and that only a single work-unit per
   4.373 - * virtual processor be assigned to a slave at a time.  So, this has to
   4.374 - * modify the virtual processor that owns the work-unit that called this
   4.375 - * function, then create a request to have the other processor modified.
   4.376 - *However, in this case, the TO processor is the outside, and transfers
   4.377 - * are only allowed to be called by the giver-upper, so can mark caller of
   4.378 - * this function as no longer owner, and return -- done.
   4.379 - */
   4.380 -void
   4.381 -VOMP__transfer_ownership_to_outside( void *data )
   4.382 - {
   4.383 -   //TODO: removeAllOwnersFrom( data );
   4.384 - }
   4.385 -
   4.386 -
   4.387 -//===========================================================================
   4.388 -
   4.389 -void
   4.390 -VOMP__send_of_type_to( SlaveVP *sendPr, void *msg, const int type,
   4.391 -                        SlaveVP *receivePr)
   4.392 - { VOMPSemReq  reqData;
   4.393 -
   4.394 -   reqData.receivePr = receivePr;
   4.395 -   reqData.sendPr    = sendPr;
   4.396 -   reqData.reqType   = send_type;
   4.397 -   reqData.msgType   = type;
   4.398 -   reqData.msg       = msg;
   4.399 -   reqData.nextReqInHashEntry = NULL;
   4.400 -
   4.401 -      //On ownership -- remove inside the send and let ownership sit in limbo
   4.402 -      // as a potential in an entry in the hash table, when this receive msg
   4.403 -      // gets paired to a send, the ownership gets added to the receivePr --
   4.404 -      // the next work-unit in the receivePr's trace will have ownership.
   4.405 -   VMS_WL__send_sem_request( &reqData, sendPr );
   4.406 -
   4.407 -      //When come back from suspend, no longer own data reachable from msg
   4.408 -      //TODO: release ownership here
   4.409 - }
   4.410 -
   4.411 -void
   4.412 -VOMP__send_from_to( void *msg, SlaveVP *sendPr, SlaveVP *receivePr )
   4.413 - { VOMPSemReq  reqData;
   4.414 -
   4.415 -      //hash on the receiver, 'cause always know it, but sometimes want to
   4.416 -      // receive from anonymous sender
   4.417 -
   4.418 -   reqData.receivePr = receivePr;
   4.419 -   reqData.sendPr    = sendPr;
   4.420 -   reqData.reqType   = send_from_to;
   4.421 -   reqData.msg       = msg;
   4.422 -   reqData.nextReqInHashEntry = NULL;
   4.423 -
   4.424 -   VMS_WL__send_sem_request( &reqData, sendPr );
   4.425 - }
   4.426 -
   4.427 -
   4.428 -//===========================================================================
   4.429 -
   4.430 -void *
   4.431 -VOMP__receive_any_to( SlaveVP *receivePr )
   4.432 - {
   4.433 -
   4.434 - }
   4.435 -
   4.436 -void *
   4.437 -VOMP__receive_type_to( const int type, SlaveVP *receivePr )
   4.438 - {       DEBUG__printf1(dbgRqstHdlr,"WL: receive type to: %d", receivePr->slaveID);
   4.439 -   VOMPSemReq  reqData;
   4.440 -
   4.441 -   reqData.receivePr = receivePr;
   4.442 -   reqData.reqType   = receive_type;
   4.443 -   reqData.msgType   = type;
   4.444 -   reqData.nextReqInHashEntry = NULL;
   4.445 -
   4.446 -   VMS_WL__send_sem_request( &reqData, receivePr );
   4.447 -   
   4.448 -   return receivePr->dataRetFromReq;
   4.449 - }
   4.450 -
   4.451 -
   4.452 -
   4.453 -/*Call this at point receiving virt pr wants in-coming data.
   4.454 - * 
   4.455 - *The reason receivePr must call this is that it modifies the receivPr
   4.456 - * loc structure directly -- and the VMS rules state a virtual processor
   4.457 - * loc structure can only be modified by itself.
   4.458 - */
   4.459 -void *
   4.460 -VOMP__receive_from_to( SlaveVP *sendPr, SlaveVP *receivePr )
   4.461 - {       DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", sendPr->slaveID, receivePr->slaveID);
   4.462 -   VOMPSemReq  reqData;
   4.463 -
   4.464 -      //hash on the receiver, 'cause always know it, but sometimes want to
   4.465 -      // receive from anonymous sender
   4.466 -
   4.467 -   reqData.receivePr = receivePr;
   4.468 -   reqData.sendPr    = sendPr;
   4.469 -   reqData.reqType   = receive_from_to;
   4.470 -   reqData.nextReqInHashEntry = NULL;
   4.471 -
   4.472 -   VMS_WL__send_sem_request( &reqData, receivePr );
   4.473 -
   4.474 -   return receivePr->dataRetFromReq;
   4.475 - }
   4.476 -
   4.477 -
   4.478 -//===========================================================================
   4.479 +//==========================================================================
   4.480  //
   4.481  /*A function singleton is a function whose body executes exactly once, on a
   4.482   * single core, no matter how many times the fuction is called and no
   4.483 @@ -601,16 +481,16 @@
   4.484   */
   4.485  
   4.486  /*asm function declarations*/
   4.487 -void asm_save_ret_to_singleton(VOMPSingleton *singletonPtrAddr);
   4.488 -void asm_write_ret_from_singleton(VOMPSingleton *singletonPtrAddr);
   4.489 +void asm_save_ret_to_singleton(VSsSingleton *singletonPtrAddr);
   4.490 +void asm_write_ret_from_singleton(VSsSingleton *singletonPtrAddr);
   4.491  
   4.492  /*Fn singleton uses ID as index into array of singleton structs held in the
   4.493   * semantic environment.
   4.494   */
   4.495  void
   4.496 -VOMP__start_fn_singleton( int32 singletonID,   SlaveVP *animPr )
   4.497 +VSs__start_fn_singleton( int32 singletonID,   SlaveVP *animPr )
   4.498   {
   4.499 -   VOMPSemReq  reqData;
   4.500 +   VSsSemReq  reqData;
   4.501  
   4.502        //
   4.503     reqData.reqType     = singleton_fn_start;
   4.504 @@ -619,7 +499,7 @@
   4.505     VMS_WL__send_sem_request( &reqData, animPr );
   4.506     if( animPr->dataRetFromReq ) //will be 0 or addr of label in end singleton
   4.507      {
   4.508 -       VOMPSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   4.509 +       VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   4.510         asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
   4.511      }
   4.512   }
   4.513 @@ -629,9 +509,9 @@
   4.514   * location.
   4.515   */
   4.516  void
   4.517 -VOMP__start_data_singleton( VOMPSingleton **singletonAddr,  SlaveVP *animPr )
   4.518 +VSs__start_data_singleton( VSsSingleton **singletonAddr,  SlaveVP *animPr )
   4.519   {
   4.520 -   VOMPSemReq  reqData;
   4.521 +   VSsSemReq  reqData;
   4.522  
   4.523     if( *singletonAddr && (*singletonAddr)->hasFinished )
   4.524         goto JmpToEndSingleton;
   4.525 @@ -658,13 +538,13 @@
   4.526   * inside is shared by all invocations of a given singleton ID.
   4.527   */
   4.528  void
   4.529 -VOMP__end_fn_singleton( int32 singletonID, SlaveVP *animPr )
   4.530 +VSs__end_fn_singleton( int32 singletonID, SlaveVP *animPr )
   4.531   {
   4.532 -   VOMPSemReq  reqData;
   4.533 +   VSsSemReq  reqData;
   4.534  
   4.535        //don't need this addr until after at least one singleton has reached
   4.536        // this function
   4.537 -   VOMPSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   4.538 +   VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   4.539     asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
   4.540  
   4.541     reqData.reqType     = singleton_fn_end;
   4.542 @@ -677,9 +557,9 @@
   4.543   }
   4.544  
   4.545  void
   4.546 -VOMP__end_data_singleton(  VOMPSingleton **singletonPtrAddr, SlaveVP *animPr )
   4.547 +VSs__end_data_singleton(  VSsSingleton **singletonPtrAddr, SlaveVP *animPr )
   4.548   {
   4.549 -   VOMPSemReq  reqData;
   4.550 +   VSsSemReq  reqData;
   4.551  
   4.552        //don't need this addr until after singleton struct has reached
   4.553        // this function for first time
   4.554 @@ -709,10 +589,10 @@
   4.555   * between as work-code.
   4.556   */
   4.557  void
   4.558 -VOMP__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   4.559 +VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   4.560                                      void *data, SlaveVP *animPr )
   4.561   {
   4.562 -   VOMPSemReq  reqData;
   4.563 +   VSsSemReq  reqData;
   4.564  
   4.565        //
   4.566     reqData.reqType          = atomic;
   4.567 @@ -737,12 +617,12 @@
   4.568   *If NULL, then write requesting into the field and resume.
   4.569   */
   4.570  void
   4.571 -VOMP__start_transaction( int32 transactionID, SlaveVP *animPr )
   4.572 +VSs__start_transaction( int32 transactionID, SlaveVP *animPr )
   4.573   {
   4.574 -   VOMPSemReq  reqData;
   4.575 +   VSsSemReq  reqData;
   4.576  
   4.577        //
   4.578 -   reqData.sendPr      = animPr;
   4.579 +   reqData.callingSlv      = animPr;
   4.580     reqData.reqType     = trans_start;
   4.581     reqData.transID     = transactionID;
   4.582  
   4.583 @@ -759,12 +639,12 @@
   4.584   * resumes both.
   4.585   */
   4.586  void
   4.587 -VOMP__end_transaction( int32 transactionID, SlaveVP *animPr )
   4.588 +VSs__end_transaction( int32 transactionID, SlaveVP *animPr )
   4.589   {
   4.590 -   VOMPSemReq  reqData;
   4.591 +   VSsSemReq  reqData;
   4.592  
   4.593        //
   4.594 -   reqData.sendPr      = animPr;
   4.595 +   reqData.callingSlv      = animPr;
   4.596     reqData.reqType     = trans_end;
   4.597     reqData.transID     = transactionID;
   4.598  
     5.1 --- a/VSs.h	Thu May 24 07:34:21 2012 -0700
     5.2 +++ b/VSs.h	Wed May 30 15:02:38 2012 -0700
     5.3 @@ -6,8 +6,8 @@
     5.4   *
     5.5   */
     5.6  
     5.7 -#ifndef _VOMP_H
     5.8 -#define	_VOMP_H
     5.9 +#ifndef _VSs_H
    5.10 +#define	_VSs_H
    5.11  
    5.12  #include "Queue_impl/PrivateQueue.h"
    5.13  #include "Hash_impl/PrivateHash.h"
    5.14 @@ -21,31 +21,69 @@
    5.15     //This is hardware dependent -- it's the number of cycles of scheduling
    5.16     // overhead -- if a work unit is fewer than this, it is better being
    5.17     // combined sequentially with other work
    5.18 -   //This value depends on both VMS overhead and VOMP's plugin.  At some point
    5.19 -   // it will be derived by perf-counter measurements during init of VOMP
    5.20 +   //This value depends on both VMS overhead and VSs's plugin.  At some point
    5.21 +   // it will be derived by perf-counter measurements during init of VSs
    5.22  #define MIN_WORK_UNIT_CYCLES 20000
    5.23  
    5.24  //===========================================================================
    5.25 -/*This header defines everything specific to the VOMP semantic plug-in
    5.26 +/*This header defines everything specific to the VSs semantic plug-in
    5.27   */
    5.28 -typedef struct _VOMPSemReq   VOMPSemReq;
    5.29 +typedef struct _VSsSemReq   VSsSemReq;
    5.30  typedef void  (*VSsTaskFnPtr )   ( void * ); //executed atomically in master
    5.31 +typedef void  (*PtrToAtomicFn )  ( void * ); //executed atomically in master
    5.32  //===========================================================================
    5.33  
    5.34 -#define IN    1;
    5.35 -#define OUT   2;
    5.36 -#define INOUT 3;
    5.37 +#define IN    1
    5.38 +#define OUT   2
    5.39 +#define INOUT 3
    5.40 +
    5.41 +#define READER  1
    5.42 +#define WRITER  2
    5.43  
    5.44  typedef struct
    5.45   {
    5.46     VSsTaskFnPtr fn;
    5.47 -   int32  numArgs;
    5.48 -   int32 *argTypes;
    5.49 -   int32 *argSizes;
    5.50 +   int32  numTotalArgs;//the number of inputs to function
    5.51 +   int32  numCtldArgs;//how many of args have dependencies
    5.52 +   int32 *argTypes;   //says reader, writer, or non-ctld
    5.53 +   int32 *argSizes;   //for detecting overlap
    5.54 +   int32  sizeOfArgs; //for memcpy of args struct
    5.55   }
    5.56  VSsTaskType;
    5.57  
    5.58  
    5.59 +typedef struct
    5.60 + {
    5.61 +   void       **args; //ctld args must come first, as ptrs
    5.62 +   VSsTaskType *taskType;
    5.63 +   int32        numBlockingProp;
    5.64 +   SlaveVP     *slaveAssignedTo;
    5.65 + }
    5.66 +VSsTaskStub;
    5.67 +
    5.68 +typedef struct
    5.69 + {
    5.70 +   VSsTaskStub *taskStub;
    5.71 +   int32        argNum;
    5.72 +   int32        isReader;
    5.73 + }
    5.74 +VSsTaskStubCarrier;
    5.75 +
    5.76 +typedef struct
    5.77 + {
    5.78 +   bool32       hasEnabledNonFinishedWriter;
    5.79 +   int32        numEnabledNonDoneReaders;
    5.80 +   PrivQStruct *waitersQ;
    5.81 + }
    5.82 +VSsPointerEntry;
    5.83 +
    5.84 +
    5.85 +typedef struct
    5.86 + {
    5.87 +   int32        type;
    5.88 +   VSsTaskStub *taskStub;
    5.89 + }
    5.90 +VSsWaiterCarrier;
    5.91  
    5.92  /*Semantic layer-specific data sent inside a request from lib called in app
    5.93   * to request handler called in AnimationMaster
    5.94 @@ -56,7 +94,7 @@
    5.95     SlaveVP      *VPCurrentlyExecuting;
    5.96     PrivQueueStruc *waitingVPQ;
    5.97   }
    5.98 -VOMPTrans;
    5.99 +VSsTrans;
   5.100  
   5.101  /*WARNING: assembly hard-codes position of endInstrAddr as first field
   5.102   */
   5.103 @@ -67,17 +105,16 @@
   5.104     int32           hasFinished;
   5.105     PrivQueueStruc *waitQ;
   5.106   }
   5.107 -VOMPSingleton;
   5.108 +VSsSingleton;
   5.109  
   5.110 -enum VOMPReqType
   5.111 +enum VSsReqType
   5.112   {
   5.113 -   send_type = 1,
   5.114 -   send_from_to,
   5.115 -   receive_any,    //order and grouping matter -- send before receive
   5.116 -   receive_type,   // and receive_any first of the receives -- Handlers
   5.117 -   receive_from_to,// rely upon this ordering of enum
   5.118 -   transfer_to,
   5.119 -   transfer_out,
   5.120 +   submit_task = 1,
   5.121 +   end_task,
   5.122 +   create_slave,
   5.123 +   create_slave_w_aff,
   5.124 +   dissipate_slave,
   5.125 +   //===============================
   5.126     malloc_req,
   5.127     free_req,
   5.128     singleton_fn_start,
   5.129 @@ -89,43 +126,43 @@
   5.130     trans_end
   5.131   };
   5.132  
   5.133 -struct _VOMPSemReq
   5.134 - { enum VOMPReqType    reqType;
   5.135 -   SlaveVP         *sendPr;
   5.136 -   SlaveVP         *receivePr;
   5.137 -   int32              msgType;
   5.138 -   void              *msg;
   5.139 -   VOMPSemReq         *nextReqInHashEntry;
   5.140 -
   5.141 +struct _VSsSemReq
   5.142 + { enum VSsReqType    reqType;
   5.143 +   SlaveVP           *callingSlv;
   5.144 +   VSsTaskType       *taskType;
   5.145 +   void              *args;
   5.146 +   VSsTaskStub       *taskStub;
   5.147 +   
   5.148 +   TopLevelFnPtr      fnPtr;
   5.149     void              *initData;
   5.150 -   TopLevelFnPtr     fnPtr;
   5.151     int32              coreToAssignOnto;
   5.152  
   5.153     int32              sizeToMalloc;
   5.154     void              *ptrToFree;
   5.155  
   5.156     int32              singletonID;
   5.157 -   VOMPSingleton     **singletonPtrAddr;
   5.158 +   VSsSingleton     **singletonPtrAddr;
   5.159  
   5.160     PtrToAtomicFn      fnToExecInMaster;
   5.161     void              *dataForFn;
   5.162  
   5.163     int32              transID;
   5.164   }
   5.165 -/* VOMPSemReq */;
   5.166 +/* VSsSemReq */;
   5.167  
   5.168  
   5.169  typedef struct
   5.170   {
   5.171     PrivQueueStruc **readyVPQs;
   5.172 -   HashTable       *commHashTbl;
   5.173 +   PrivQueueStruc  *taskReadyQ;  //Q: shared or local?
   5.174 +   HashTable       *argPtrHashTbl;
   5.175     int32            numSlaveVP;
   5.176     int32            nextCoreToGetNewPr;
   5.177     int32            primitiveStartTime;
   5.178  
   5.179                         //fix limit on num with dynArray
   5.180 -   VOMPSingleton     fnSingletons[NUM_STRUCS_IN_SEM_ENV];
   5.181 -   VOMPTrans         transactionStrucs[NUM_STRUCS_IN_SEM_ENV];
   5.182 +   VSsSingleton     fnSingletons[NUM_STRUCS_IN_SEM_ENV];
   5.183 +   VSsTrans         transactionStrucs[NUM_STRUCS_IN_SEM_ENV];
   5.184     
   5.185     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   5.186     ListOfArrays* unitList;
   5.187 @@ -144,7 +181,7 @@
   5.188     SlaveVP* idlePr[NUM_CORES][NUM_ANIM_SLOTS];
   5.189     int shutdownInitiated;
   5.190   }
   5.191 -VOMPSemEnv;
   5.192 +VSsSemEnv;
   5.193  
   5.194  
   5.195  typedef struct _TransListElem TransListElem;
   5.196 @@ -159,125 +196,106 @@
   5.197   {
   5.198     int32          highestTransEntered;
   5.199     TransListElem *lastTransEntered;
   5.200 +   bool32         needsTaskAssigned;
   5.201 +   VSsTaskStub   *taskStub;
   5.202   }
   5.203 -VOMPSemData;
   5.204 +VSsSemData;
   5.205   
   5.206  //===========================================================================
   5.207  
   5.208  void
   5.209 -VOMP__create_seed_procr_and_do_work( TopLevelFnPtr fn, void *initData );
   5.210 +VSs__create_seed_slave_and_do_work( TopLevelFnPtr fn, void *initData );
   5.211  
   5.212  int32
   5.213 -VOMP__giveMinWorkUnitCycles( float32 percentOverhead );
   5.214 +VSs__giveMinWorkUnitCycles( float32 percentOverhead );
   5.215  
   5.216  void
   5.217 -VOMP__start_primitive();
   5.218 +VSs__start_primitive();
   5.219  
   5.220  int32
   5.221 -VOMP__end_primitive_and_give_cycles();
   5.222 +VSs__end_primitive_and_give_cycles();
   5.223  
   5.224  int32
   5.225 -VOMP__giveIdealNumWorkUnits();
   5.226 +VSs__giveIdealNumWorkUnits();
   5.227  
   5.228  int32
   5.229 -VOMP__give_number_of_cores_to_schedule_onto();
   5.230 +VSs__give_number_of_cores_to_schedule_onto();
   5.231  
   5.232  //=======================
   5.233  
   5.234  void
   5.235 -VOMP__init();
   5.236 +VSs__init();
   5.237  
   5.238  void
   5.239 -VOMP__cleanup_after_shutdown();
   5.240 +VSs__cleanup_after_shutdown();
   5.241  
   5.242  //=======================
   5.243  
   5.244    SlaveVP *
   5.245 -VOMP__create_procr_with( TopLevelFnPtr fnPtr, void *initData,
   5.246 +VSs__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
   5.247                            SlaveVP *creatingSlv );
   5.248  
   5.249    SlaveVP *
   5.250 -VOMP__create_procr_with_affinity( TopLevelFnPtr fnPtr,    void *initData,
   5.251 +VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr,    void *initData,
   5.252                              SlaveVP *creatingPr, int32 coreToAssignOnto);
   5.253  
   5.254  void
   5.255 -VOMP__dissipate_procr( SlaveVP *procrToDissipate );
   5.256 +VSs__dissipate_slave( SlaveVP *slaveToDissipate );
   5.257  
   5.258  //=======================
   5.259 -void *
   5.260 -VOMP__malloc_to( int numBytes, SlaveVP *ownerSlv );
   5.261 +
   5.262 +#define VSs__malloc( numBytes, callingSlave ) VMS_App__malloc( numBytes, callingSlave)
   5.263 +
   5.264 +#define VSs__free(ptrToFree, callingSlave ) VMS_App__free( ptrToFree, callingSlave )
   5.265 +
   5.266 +
   5.267 +//=======================
   5.268 +int32
   5.269 +VSs__submit_task( VSsTaskType *taskType, void **args, SlaveVP *animSlv);
   5.270 +
   5.271  
   5.272  void
   5.273 -VOMP__free( void *ptrToFree, SlaveVP *owningSlv );
   5.274 -
   5.275 -void
   5.276 -VOMP__transfer_ownership_of_from_to( void *data, SlaveVP *oldOwnerPr,
   5.277 -                                                    SlaveVP *newOwnerSlv );
   5.278 -                                                    
   5.279 -void
   5.280 -VOMP__add_ownership_by_to( SlaveVP *newOwnerPr, void *data );
   5.281 -
   5.282 -void
   5.283 -VOMP__remove_ownership_by_from( SlaveVP *loserPr, void *dataLosing );
   5.284 -
   5.285 -void
   5.286 -VOMP__transfer_ownership_to_outside( void *dataToTransferOwnershipOf );
   5.287 -
   5.288 -
   5.289 -
   5.290 -//=======================
   5.291 -void
   5.292 -VOMP__send_of_type_to( SlaveVP *sendPr, void *msg, const int type,
   5.293 -                        SlaveVP *receivePr);
   5.294 -
   5.295 -void
   5.296 -VOMP__send_from_to( void *msg, SlaveVP *sendPr, SlaveVP *receivePr);
   5.297 -
   5.298 -void *
   5.299 -VOMP__receive_type_to( const int type, SlaveVP *receiveSlv );
   5.300 -
   5.301 -void *
   5.302 -VOMP__receive_from_to( SlaveVP *sendPr, SlaveVP *receiveSlv );
   5.303 +VSs__end_task( SlaveVP *animSlv );
   5.304  
   5.305  
   5.306  //======================= Concurrency Stuff ======================
   5.307  void
   5.308 -VOMP__start_fn_singleton( int32 singletonID, SlaveVP *animSlv );
   5.309 +VSs__start_fn_singleton( int32 singletonID, SlaveVP *animSlv );
   5.310  
   5.311  void
   5.312 -VOMP__end_fn_singleton( int32 singletonID, SlaveVP *animSlv );
   5.313 +VSs__end_fn_singleton( int32 singletonID, SlaveVP *animSlv );
   5.314  
   5.315  void
   5.316 -VOMP__start_data_singleton( VOMPSingleton **singeltonAddr, SlaveVP *animSlv );
   5.317 +VSs__start_data_singleton( VSsSingleton **singeltonAddr, SlaveVP *animSlv );
   5.318  
   5.319  void
   5.320 -VOMP__end_data_singleton( VOMPSingleton **singletonAddr, SlaveVP *animSlv );
   5.321 +VSs__end_data_singleton( VSsSingleton **singletonAddr, SlaveVP *animSlv );
   5.322  
   5.323  void
   5.324 -VOMP__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   5.325 +VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   5.326                                      void *data, SlaveVP *animSlv );
   5.327  
   5.328  void
   5.329 -VOMP__start_transaction( int32 transactionID, SlaveVP *animSlv );
   5.330 +VSs__start_transaction( int32 transactionID, SlaveVP *animSlv );
   5.331  
   5.332  void
   5.333 -VOMP__end_transaction( int32 transactionID, SlaveVP *animSlv );
   5.334 +VSs__end_transaction( int32 transactionID, SlaveVP *animSlv );
   5.335  
   5.336  
   5.337  //=========================  Internal use only  =============================
   5.338  void
   5.339 -VOMP__Request_Handler( SlaveVP *requestingPr, void *_semEnv );
   5.340 +VSs__Request_Handler( SlaveVP *requestingPr, void *_semEnv );
   5.341  
   5.342  SlaveVP *
   5.343 -VOMP__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot );
   5.344 +VSs__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot );
   5.345  
   5.346  SlaveVP*
   5.347 -VOMP__create_procr_helper( TopLevelFnPtr fnPtr, void *initData,
   5.348 -                          VOMPSemEnv *semEnv,    int32 coreToAssignOnto );
   5.349 +VSs__create_slave_helper( TopLevelFnPtr fnPtr, void *initData,
   5.350 +                          VSsSemEnv *semEnv,    int32 coreToAssignOnto );
   5.351  
   5.352  //=====================  Measurement of Lang Overheads  =====================
   5.353 -#include "VOMP_Measurement.h"
   5.354 +#include "VSs_Measurement.h"
   5.355  
   5.356  //===========================================================================
   5.357 -#endif	/* _VOMP_H */
   5.358 +#endif	/* _VSs_H */
   5.359  
     6.1 --- a/VSs_PluginFns.c	Thu May 24 07:34:21 2012 -0700
     6.2 +++ b/VSs_PluginFns.c	Wed May 30 15:02:38 2012 -0700
     6.3 @@ -8,107 +8,135 @@
     6.4  #include <stdlib.h>
     6.5  
     6.6  #include "Queue_impl/PrivateQueue.h"
     6.7 -#include "VOMP.h"
     6.8 -#include "VOMP_Request_Handlers.h"
     6.9 +#include "VSs.h"
    6.10 +#include "VSs_Request_Handlers.h"
    6.11  
    6.12  //=========================== Local Fn Prototypes ===========================
    6.13  void
    6.14 -resume_slaveVP( SlaveVP *procr, VOMPSemEnv *semEnv );
    6.15 +resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv );
    6.16  
    6.17  void
    6.18 -handleSemReq( VMSReqst *req, SlaveVP *requestingPr, VOMPSemEnv *semEnv );
    6.19 +handleSemReq( VMSReqst *req, SlaveVP *requestingPr, VSsSemEnv *semEnv );
    6.20  
    6.21  void
    6.22 -handleDissipate(             SlaveVP *requestingPr, VOMPSemEnv *semEnv );
    6.23 +handleDissipate(                SlaveVP *requestingPr, VSsSemEnv *semEnv );
    6.24  
    6.25  void
    6.26 -handleCreate( VMSReqst *req, SlaveVP *requestingPr, VOMPSemEnv *semEnv  );
    6.27 -
    6.28 +handleCreate(    VMSReqst *req, SlaveVP *requestingPr, VSsSemEnv *semEnv );
    6.29  
    6.30  //============================== Assigner ==================================
    6.31  //
    6.32 -/*For VOMP, assigning a slave simply takes the next work-unit off the
    6.33 +/*For VSs, assigning a slave simply takes the next work-unit off the
    6.34   * ready-to-go work-unit queue and assigns it to the offered slot.
    6.35   *If the ready-to-go work-unit queue is empty, then nothing to assign
    6.36   * to the animation slot -- return FALSE to let Master loop know assigning
    6.37   * that slot failed.
    6.38   */
    6.39  SlaveVP *
    6.40 -VOMP__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot )
    6.41 +VSs__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot )
    6.42   { SlaveVP   *assignPr;
    6.43 -   VOMPSemEnv *semEnv;
    6.44 +   VSsSemEnv *semEnv;
    6.45     int32      coreNum, slotNum;
    6.46     
    6.47     coreNum = slot->coreSlotIsOn;
    6.48     slotNum = slot->slotIdx;
    6.49     
    6.50 -   semEnv  = (VOMPSemEnv *)_semEnv;
    6.51 +   semEnv  = (VSsSemEnv *)_semEnv;
    6.52  
    6.53 +   /*At this point, could do an optimization -- have one slave for each slot
    6.54 +    * and make it ALWAYS the one to assign to that slot -- so there is no
    6.55 +    * read fromQ.  However, going to keep this compatible with other
    6.56 +    * languages, like VOMP and SSR.  So, leave the normal slave fetch
    6.57 +    * from readyQ. For example, allows SSR constructs, to create extra
    6.58 +    * slaves, and send communications direction between them, while still
    6.59 +    * having the StarSs-style spawning of tasks..  so one of the tasks
    6.60 +    * can now suspend and do more interesting things..  means keep a pool
    6.61 +    * of slaves, and take one from pool when a task suspends.
    6.62 +    */
    6.63 +   //TODO: fix false sharing in array
    6.64     assignPr = readPrivQ( semEnv->readyVPQs[coreNum] );
    6.65 +   if( assignPr == NULL )
    6.66 +    { //if there are tasks ready to go, then make a new slave to animate
    6.67 +      // This only happens when all available slaves are blocked by
    6.68 +      // constructs like send, or mutex, and so on..
    6.69 +      VMS_PI__throw_exception( "no slaves in readyQ", NULL, NULL );
    6.70 +    }
    6.71 +   if( assignPr != NULL ) //could still be NULL, if no tasks avail
    6.72 +    {
    6.73 +      if( ((VSsSemData *)assignPr->semanticData)->needsTaskAssigned )
    6.74 +       { VSsTaskStub *
    6.75 +         newTaskStub = readQ( semEnv->taskReadyQ );
    6.76 +         if( newTaskStub == NULL )
    6.77 +          { //No task, so slave unused, so put it back and return "no-slave"
    6.78 +            writeQ( assignPr, semEnv->readyVPQs[coreNum] );
    6.79 +            return NULL;
    6.80 +          }
    6.81 +         //point slave to the task's function, and mark slave as having task
    6.82 +         VMS_int__reset_slaveVP_to_TopLvlFn( assignPr, 
    6.83 +                             newTaskStub->taskType->fn, newTaskStub->args );
    6.84 +         ((VSsSemData *)assignPr->semanticData)->taskStub = newTaskStub;
    6.85 +         newTaskStub->slaveAssignedTo = assignPr;
    6.86 +         ((VSsSemData *)assignPr->semanticData)->needsTaskAssigned = FALSE;
    6.87 +       }
    6.88 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    6.89 +    }
    6.90        //Note, using a non-blocking queue -- it returns NULL if queue empty
    6.91 -  if(!assignPr){
    6.92 -       assignPr = semEnv->idlePr[coreNum][slotNum];
    6.93 -      
    6.94 -      if(semEnv->shutdownInitiated) {
    6.95 -          assignPr = VMS_SS__create_shutdown_slave();
    6.96 -      }
    6.97 -     //things that would normally happen in resume(), but these VPs never go there
    6.98 -     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    6.99 -        assignPr->assignCount++; //Somewhere here!
   6.100 -        Unit newu;
   6.101 -        newu.vp = assignPr->slaveID;
   6.102 -        newu.task = assignPr->assignCount;
   6.103 -        addToListOfArrays(Unit,newu,semEnv->unitList);
   6.104 -   
   6.105 -        if (assignPr->assignCount > 1){
   6.106 -                Dependency newd;
   6.107 -                newd.from_vp = assignPr->slaveID;
   6.108 -                newd.from_task = assignPr->assignCount - 1;
   6.109 -                newd.to_vp = assignPr->slaveID;
   6.110 -                newd.to_task = assignPr->assignCount;
   6.111 -                addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   6.112 -        }
   6.113 +   else //assignPr is indeed NULL
   6.114 +    { assignPr = semEnv->idlePr[coreNum][slotNum]; 
   6.115 +      if(semEnv->shutdownInitiated) 
   6.116 +       { assignPr = VMS_SS__create_shutdown_slave();
   6.117 +       }
   6.118 +      //things that would normally happen in resume(), but these VPs
   6.119 +      // never go there
   6.120 +         assignPr->assignCount++; //Somewhere here!
   6.121 +         Unit newu;
   6.122 +         newu.vp = assignPr->slaveID;
   6.123 +         newu.task = assignPr->assignCount;
   6.124 +         addToListOfArrays(Unit,newu,semEnv->unitList);
   6.125 +
   6.126 +         if (assignPr->assignCount > 1)
   6.127 +          { Dependency newd;
   6.128 +            newd.from_vp = assignPr->slaveID;
   6.129 +            newd.from_task = assignPr->assignCount - 1;
   6.130 +            newd.to_vp = assignPr->slaveID;
   6.131 +            newd.to_task = assignPr->assignCount;
   6.132 +            addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   6.133 +          }
   6.134        #endif
   6.135 -   }
   6.136 +    }
   6.137     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   6.138 -   if (assignPr) {
   6.139 -        //assignPr->numTimesAssigned++;
   6.140 -        Unit prev_in_slot = semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
   6.141 -        if(prev_in_slot.vp != 0){
   6.142 -                Dependency newd;
   6.143 -                newd.from_vp = prev_in_slot.vp;
   6.144 -                newd.from_task = prev_in_slot.task;
   6.145 -                newd.to_vp = assignPr->slaveID;
   6.146 -                newd.to_task = assignPr->assignCount;
   6.147 -                addToListOfArrays(Dependency,newd,semEnv->hwArcs);   
   6.148 -        }
   6.149 -        prev_in_slot.vp = assignPr->slaveID;
   6.150 -        prev_in_slot.task = assignPr->assignCount;
   6.151 -        semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] = prev_in_slot;        
   6.152 -   }
   6.153 +   if( assignPr != NULL )
   6.154 +    { //assignPr->numTimesAssigned++;
   6.155 +      Unit prev_in_slot = 
   6.156 +         semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
   6.157 +      if(prev_in_slot.vp != 0)
   6.158 +       { Dependency newd;
   6.159 +         newd.from_vp = prev_in_slot.vp;
   6.160 +         newd.from_task = prev_in_slot.task;
   6.161 +         newd.to_vp = assignPr->slaveID;
   6.162 +         newd.to_task = assignPr->assignCount;
   6.163 +         addToListOfArrays(Dependency,newd,semEnv->hwArcs);   
   6.164 +       }
   6.165 +      prev_in_slot.vp = assignPr->slaveID;
   6.166 +      prev_in_slot.task = assignPr->assignCount;
   6.167 +      semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] =
   6.168 +         prev_in_slot;        
   6.169 +    }
   6.170     #endif
   6.171     return( assignPr );
   6.172   }
   6.173  
   6.174  
   6.175 -//===========================  Request Handler  =============================
   6.176 +//===========================  Request Handler  ============================
   6.177  //
   6.178 -/*Will get requests to send, to receive, and to create new processors.
   6.179 - * Upon send, check the hash to see if a receive is waiting.
   6.180 - * Upon receive, check hash to see if a send has already happened.
   6.181 - * When other is not there, put in.  When other is there, the comm.
   6.182 - *  completes, which means the receiver P gets assigned and
   6.183 - *  picks up right after the receive request.  So make the work-unit
   6.184 - *  and put it into the queue of work-units ready to go.
   6.185 - * Other request is create a new Processor, with the function to run in the
   6.186 - *  Processor, and initial data.
   6.187 +/*
   6.188   */
   6.189  void
   6.190 -VOMP__Request_Handler( SlaveVP *requestingPr, void *_semEnv )
   6.191 - { VOMPSemEnv *semEnv;
   6.192 -   VMSReqst    *req;
   6.193 +VSs__Request_Handler( SlaveVP *requestingPr, void *_semEnv )
   6.194 + { VSsSemEnv *semEnv;
   6.195 +   VMSReqst  *req;
   6.196     
   6.197 -   semEnv = (VOMPSemEnv *)_semEnv;
   6.198 +   semEnv = (VSsSemEnv *)_semEnv;
   6.199  
   6.200     req    = VMS_PI__take_next_request_out_of( requestingPr );
   6.201  
   6.202 @@ -119,7 +147,7 @@
   6.203              break;
   6.204           case createReq:    handleCreate(         req, requestingPr, semEnv);
   6.205              break;
   6.206 -         case dissipate:    handleDissipate(           requestingPr, semEnv);
   6.207 +         case dissipate:    handleDissipate(      req, requestingPr, semEnv);
   6.208              break;
   6.209           case VMSSemantic:  VMS_PI__handle_VMSSemReq(req, requestingPr, semEnv,
   6.210                                               (ResumeSlvFnPtr) &resume_slaveVP);
   6.211 @@ -135,25 +163,18 @@
   6.212  
   6.213  
   6.214  void
   6.215 -handleSemReq( VMSReqst *req, SlaveVP *reqPr, VOMPSemEnv *semEnv )
   6.216 - { VOMPSemReq *semReq;
   6.217 +handleSemReq( VMSReqst *req, SlaveVP *reqPr, VSsSemEnv *semEnv )
   6.218 + { VSsSemReq *semReq;
   6.219  
   6.220     semReq = VMS_PI__take_sem_reqst_from(req);
   6.221     if( semReq == NULL ) return;
   6.222     switch( semReq->reqType )  //sem handlers are all in other file
   6.223      {
   6.224 -      case send_type:       handleSendType(     semReq,        semEnv);
   6.225 +      case submit_task:     handleSubmitTask(   semReq,        semEnv);
   6.226           break;
   6.227 -      case send_from_to:    handleSendFromTo(   semReq,        semEnv);
   6.228 +      case end_task:        handleEndTask(      semReq,        semEnv);
   6.229           break;
   6.230 -      case receive_type:    handleReceiveType(  semReq,        semEnv);
   6.231 -         break;
   6.232 -      case receive_from_to: handleReceiveFromTo(semReq,        semEnv);
   6.233 -         break;
   6.234 -      case transfer_to:     handleTransferTo(   semReq,        semEnv);
   6.235 -         break;
   6.236 -      case transfer_out:    handleTransferOut(  semReq,        semEnv);
   6.237 -         break;
   6.238 +      //====================================================================
   6.239        case malloc_req:      handleMalloc(       semReq, reqPr, semEnv);
   6.240           break;
   6.241        case free_req:        handleFree(         semReq, reqPr, semEnv);
   6.242 @@ -178,9 +199,10 @@
   6.243  
   6.244  
   6.245  //=========================== VMS Request Handlers ==============================
   6.246 -//
   6.247 +/*SlaveVP dissipate  (NOT task-end!)
   6.248 + */
   6.249  void
   6.250 -handleDissipate( SlaveVP *requestingPr, VOMPSemEnv *semEnv )
   6.251 +handleDissipate( SlaveVP *requestingPr, VSsSemEnv *semEnv )
   6.252   {
   6.253      DEBUG__printf1(dbgRqstHdlr,"Dissipate request from processor %d",requestingPr->slaveID)
   6.254        //free any semantic data allocated to the virt procr
   6.255 @@ -200,17 +222,17 @@
   6.256  /*Re-use this in the entry-point fn
   6.257   */
   6.258    SlaveVP *
   6.259 -VOMP__create_procr_helper( TopLevelFnPtr fnPtr, void *initData,
   6.260 -                          VOMPSemEnv *semEnv,    int32 coreToAssignOnto )
   6.261 +VSs__create_slave_helper( TopLevelFnPtr fnPtr, void *initData,
   6.262 +                          VSsSemEnv *semEnv,    int32 coreToAssignOnto )
   6.263   { SlaveVP    *newPr;
   6.264 -   VOMPSemData   *semData;
   6.265 +   VSsSemData   *semData;
   6.266  
   6.267        //This is running in master, so use internal version
   6.268     newPr = VMS_PI__create_slaveVP( fnPtr, initData );
   6.269  
   6.270     semEnv->numSlaveVP += 1;
   6.271  
   6.272 -   semData = VMS_PI__malloc( sizeof(VOMPSemData) );
   6.273 +   semData = VMS_PI__malloc( sizeof(VSsSemData) );
   6.274     semData->highestTransEntered = -1;
   6.275     semData->lastTransEntered    = NULL;
   6.276  
   6.277 @@ -240,15 +262,17 @@
   6.278     return newPr;
   6.279   }
   6.280  
   6.281 +/*SlaveVP create  (NOT task create!)
   6.282 + */
   6.283  void
   6.284 -handleCreate( VMSReqst *req, SlaveVP *requestingPr, VOMPSemEnv *semEnv  )
   6.285 - { VOMPSemReq *semReq;
   6.286 +handleCreate( VMSReqst *req, SlaveVP *requestingPr, VSsSemEnv *semEnv  )
   6.287 + { VSsSemReq *semReq;
   6.288     SlaveVP    *newPr;
   6.289     
   6.290     
   6.291     semReq = VMS_PI__take_sem_reqst_from( req );
   6.292   
   6.293 -   newPr = VOMP__create_procr_helper( semReq->fnPtr, semReq->initData, semEnv,
   6.294 +   newPr = VSs__create_slave_helper( semReq->fnPtr, semReq->initData, semEnv,
   6.295                                       semReq->coreToAssignOnto );
   6.296     
   6.297           DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d", requestingPr->slaveID, newPr->slaveID)
   6.298 @@ -263,7 +287,7 @@
   6.299     addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   6.300     #endif
   6.301  
   6.302 -      //For VOMP, caller needs ptr to created processor returned to it
   6.303 +      //For VSs, caller needs ptr to created processor returned to it
   6.304     requestingPr->dataRetFromReq = newPr;
   6.305  
   6.306     resume_slaveVP( newPr,        semEnv );
   6.307 @@ -273,30 +297,30 @@
   6.308  
   6.309  //=========================== Helper ==============================
   6.310  void
   6.311 -resume_slaveVP( SlaveVP *procr, VOMPSemEnv *semEnv )
   6.312 +resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv )
   6.313   {
   6.314     #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
   6.315  /*
   6.316 -   int lastRecordIdx = procr->counter_history_array_info->numInArray -1;
   6.317 -   CounterRecord* lastRecord = procr->counter_history[lastRecordIdx];
   6.318 +   int lastRecordIdx = slave->counter_history_array_info->numInArray -1;
   6.319 +   CounterRecord* lastRecord = slave->counter_history[lastRecordIdx];
   6.320     saveLowTimeStampCountInto(lastRecord->unblocked_timestamp);
   6.321  */
   6.322     #endif
   6.323     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   6.324 -   procr->assignCount++; //Somewhere here!
   6.325 +   slave->assignCount++; //Somewhere here!
   6.326     Unit newu;
   6.327 -   newu.vp = procr->slaveID;
   6.328 -   newu.task = procr->assignCount;
   6.329 +   newu.vp = slave->slaveID;
   6.330 +   newu.task = slave->assignCount;
   6.331     addToListOfArrays(Unit,newu,semEnv->unitList);
   6.332     
   6.333 -   if (procr->assignCount > 1){
   6.334 +   if (slave->assignCount > 1){
   6.335          Dependency newd;
   6.336 -        newd.from_vp = procr->slaveID;
   6.337 -        newd.from_task = procr->assignCount - 1;
   6.338 -        newd.to_vp = procr->slaveID;
   6.339 -        newd.to_task = procr->assignCount;
   6.340 +        newd.from_vp = slave->slaveID;
   6.341 +        newd.from_task = slave->assignCount - 1;
   6.342 +        newd.to_vp = slave->slaveID;
   6.343 +        newd.to_task = slave->assignCount;
   6.344          addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   6.345     }
   6.346     #endif
   6.347 -   writePrivQ( procr, semEnv->readyVPQs[ procr->coreAnimatedBy] );
   6.348 +   writePrivQ( slave, semEnv->readyVPQs[ slave->coreAnimatedBy] );
   6.349   }
     7.1 --- a/VSs_Request_Handlers.c	Thu May 24 07:34:21 2012 -0700
     7.2 +++ b/VSs_Request_Handlers.c	Wed May 30 15:02:38 2012 -0700
     7.3 @@ -10,28 +10,29 @@
     7.4  #include "VMS_impl/VMS.h"
     7.5  #include "Queue_impl/PrivateQueue.h"
     7.6  #include "Hash_impl/PrivateHash.h"
     7.7 -#include "VOMP.h"
     7.8 +#include "VSs.h"
     7.9  
    7.10  
    7.11  
    7.12  //=========================== Local Fn Prototypes ===========================
    7.13  void
    7.14 -resume_slaveVP( SlaveVP *procr, VOMPSemEnv *semEnv );
    7.15 +resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv );
    7.16  
    7.17  
    7.18  
    7.19 -//===========================================================================
    7.20 +//==========================================================================
    7.21  //                           Helpers
    7.22 +//
    7.23  
    7.24  /*Only clone the elements of req used in these reqst handlers
    7.25   */
    7.26 -  VOMPSemReq *
    7.27 -cloneReq( VOMPSemReq *semReq )
    7.28 - { VOMPSemReq *clonedReq;
    7.29 +  VSsSemReq *
    7.30 +cloneReq( VSsSemReq *semReq )
    7.31 + { VSsSemReq *clonedReq;
    7.32  
    7.33 -   clonedReq             = VMS_PI__malloc( sizeof(VOMPSemReq) );
    7.34 +   clonedReq             = VMS_PI__malloc( sizeof(VSsSemReq) );
    7.35     clonedReq->reqType    = semReq->reqType;
    7.36 -   clonedReq->sendPr     = semReq->sendPr;
    7.37 +   clonedReq->callingSlv     = semReq->callingSlv;
    7.38     clonedReq->msg        = semReq->msg;
    7.39     clonedReq->nextReqInHashEntry = NULL;
    7.40     
    7.41 @@ -39,10 +40,10 @@
    7.42   }
    7.43  
    7.44  HashEntry *
    7.45 -giveEntryElseInsertReqst( char *key, VOMPSemReq *semReq,
    7.46 +giveEntryElseInsertReqst( char *key, VSsSemReq *semReq,
    7.47      HashTable   *commHashTbl )
    7.48   { HashEntry    *entry;
    7.49 -   VOMPSemReq    *waitingReq;
    7.50 +   VSsSemReq    *waitingReq;
    7.51  
    7.52     entry = getEntryFromTable( (char *)key, commHashTbl );
    7.53     if( entry == NULL )
    7.54 @@ -51,7 +52,7 @@
    7.55        addValueIntoTable( key, cloneReq( semReq ), commHashTbl );
    7.56        return NULL;
    7.57      }
    7.58 -   waitingReq = (VOMPSemReq *)entry->content;
    7.59 +   waitingReq = (VSsSemReq *)entry->content;
    7.60     if( waitingReq == NULL )  //might happen when last waiting gets paired
    7.61      {    //no waiting sends or receives, so add this request and exit
    7.62        entry->content = semReq;
    7.63 @@ -60,356 +61,411 @@
    7.64     return entry;
    7.65   }
    7.66  
    7.67 +inline VSsPointerEntry *
    7.68 +create_pointer_entry_and_insert( void *argPtr )
    7.69 + { VSsPointerEntry newEntry;
    7.70 +   
    7.71 +   newEntry = VMS_PI__malloc( sizeof(VSsPointerEntry) );
    7.72 +   newEntry->hasEnabledNonFinishedWriter = FALSE;
    7.73 +   newEntry->numEnabledNonDoneReaders    = 0;
    7.74 +   newEntry->waitersQ                    = makePrivQ();
    7.75 + }
    7.76  
    7.77 +/*malloc's space and initializes fields -- and COPIES the arg values
    7.78 + * to new space
    7.79 + */
    7.80 +inline VSsTaskStub *
    7.81 +create_task_stub( VSsTaskType *taskType, void **args )
    7.82 + { void **newArgs;
    7.83 +   int32  i, numArgs;
    7.84 +   VSsTaskStub *
    7.85 +   newStub = malloc( sizeof(VSsTaskStub) + taskType->sizeOfArgs );
    7.86 +   newStub->numBlockingProp = taskType->numCtldArgs;
    7.87 +   newStub->slaveAssignedTo = NULL;
    7.88 +   newStub->taskType = taskType;
    7.89 +   newArgs = (void **)((uint8 *)newStub) + sizeof(VSsTaskStub);
    7.90 +   newStub->args = newArgs;
    7.91 +   
    7.92 +      //Copy the arg-pointers.. can be more arguments than just the ones 
    7.93 +      // that StarSs uses to control ordering of task execution.
    7.94 +   memcpy( newArgs, args, taskType->sizeOfArgs );
    7.95 + }
    7.96  
    7.97 +inline VSsTaskStubCarrier *
    7.98 +create_task_carrier( VSsTaskStub *taskStub, int32 argNum, int32 rdOrWrite )
    7.99 + { VSsTaskStubCarrier newCarrier;
   7.100 + 
   7.101 +   newCarrier = VMS_PI__malloc( sizeof(VSsTaskStubCarrier) );
   7.102 +   newCarrier->taskStub = taskStub;
   7.103 +   newCarrier->argNum   = argNum;
   7.104 +   newCarrier->isReader = rdOrWrite == READER;
   7.105 + }
   7.106  
   7.107 -//===========================================================================
   7.108 +//==========================================================================
   7.109 +//
   7.110 +//
   7.111 +/*Submit Task
   7.112 + * 
   7.113 + *Uses a hash table to match the arg-pointers to each other. So, an
   7.114 + * argument-pointer is one-to-one with a hash-table entry.
   7.115 + * 
   7.116 + *If overlapping region detection is enabled, then a hash entry is one
   7.117 + * link in a ring of all entries that overlap each other.  For example,
   7.118 + * say region A shared common addresses with region B, but the pointers
   7.119 + * to them are different, then the hash entries for the two would be
   7.120 + * linked in a ring.  When a pointer is processed, all the pointers in
   7.121 + * the ring are processed (Doesn't differentiate independent siblings
   7.122 + * from parent-child or conjoined twins overlap..)
   7.123 + * NOT ENABLED AS OF MAY 25 2012
   7.124 + * 
   7.125 + *A hash entry has a queue of tasks that are waiting to access the
   7.126 + * pointed-to  region.  The queue goes in the order of creation of
   7.127 + * the tasks.  Each entry in the queue has a pointer to the task-stub
   7.128 + * and whether the task reads-only vs writes to the hash-entry's region.
   7.129 + * 
   7.130 + *A hash entry also has a count of the enabled but not yet finished readers
   7.131 + * of the region. It also has a flag that says whether a writer has been
   7.132 + * enabled and is not yet finished.
   7.133 + * 
   7.134 + *There are two kinds of events that access a hash entry: creation of a
   7.135 + * task and end of a task.
   7.136 + *
   7.137 + * 
   7.138 + * ==========================  creation  ========================== 
   7.139 + * 
   7.140 + *At creation, make a task-stub.  Set the count of blocking propendents
   7.141 + * to the number of controlled arguments (a task can have
   7.142 + * arguments that are not controlled by the language, like simple integer
   7.143 + * inputs from the sequential portion. Note that all controlled arguments
   7.144 + * are pointers, and marked as controlled in the application code).
   7.145 + * 
   7.146 + *The controlled arguments are then processed one by one.
   7.147 + *Processing an argument means getting the hash of the pointer.  Then,
   7.148 + * looking up the hash entry.  (If none, create one).
   7.149 + *With the hash entry:
   7.150 + *
   7.151 + *If the arg is a reader, and the entry does not have an enabled
   7.152 + * non-finished writer, and the queue is empty (could be prev readers,
   7.153 + * then a writer that got queued and now new readers that have to also be
   7.154 + * queued).
   7.155 + *The reader is free.  So, decrement the blocking-propendent count in
   7.156 + * the task-stub. If the count is zero, then put the task-stub into the
   7.157 + * readyQ.
   7.158 + *At the same time, increment the hash-entry's count of enabled and
   7.159 + * non-finished readers. 
   7.160 + * 
   7.161 + *Otherwise, the reader is put into the hash-entry's Q of waiters
   7.162 + * 
   7.163 + *If the arg is a writer, plus the entry does not have a current writer,
   7.164 + * plus the number of enabled non-finished readers is zero, plus the Q is
   7.165 + * empty, then the writer is free.  Mark the entry has having an
   7.166 + * enabled and non-finished writer.  Decrement the blocking-propendent
   7.167 + * count in the writer's task-stub. If the count is zero, then put the
   7.168 + * task-stub into the readyQ.
   7.169 + * 
   7.170 + *Otherwise, put the writer into the entry's Q of waiters.
   7.171 + * 
   7.172 + *No matter what, if the hash entry was chained, put it at the start of
   7.173 + * the chain.  (Means no-longer-used pointers accumulate at end of chain,
   7.174 + * decide garbage collection of no-longer-used pointers later)
   7.175 + *
   7.176 + *  
   7.177 + * ========================== end of task ===========================
   7.178 + * 
   7.179 + *At the end of a task,
   7.180 + *The task's controlled arguments are processed one by one.
   7.181 + *Processing an argument means getting the hash of the pointer.  Then,
   7.182 + * looking up the hash entry (and putting the entry at the start of the
   7.183 + * chain, if there was a chain).
   7.184 + *With the hash entry:
   7.185 + *
   7.186 + *If the arg is a reader, then decrement the enabled and non-finished
   7.187 + * reader-count in the hash-entry. If the count becomes zero, then take
   7.188 + * the next entry from the Q. It should be a writer, or else there's a
   7.189 + * bug in this algorithm.
   7.190 + *Set the hash-entry to have an enabled non-finished writer.  Decrement
   7.191 + * the blocking-propendent-count of the writer's task-stub.  If the count
   7.192 + * has reached zero, then put the task-stub into the readyQ.
   7.193 + * 
   7.194 + *If the arg is a writer, then clear the enabled non-finished writer flag
   7.195 + * of the hash-entry. Take the next entry from the Q. 
   7.196 + *If it is a writer, then turn the flag back on.  Decrement the writer's
   7.197 + * blocking-propendent-count in its task-stub.  If it becomes zero, then
   7.198 + * put the task-stub into the readyQ.
   7.199 + *
   7.200 + *If it is a reader, then increment the hash-entry's count of enabled
   7.201 + * non-finished readers.  Decrement the blocking propendents count of the
   7.202 + * reader's task-stub.  If it reaches zero, then put the task-stub into the
   7.203 + * readyQ.
   7.204 + *Then repeat until encounter a writer -- put that writer back into the Q.
   7.205 + * 
   7.206 + *That should be it -- that should work.
   7.207 + */
   7.208 +void
   7.209 +handleSubmitTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
   7.210 + { int64            key[] = {0,0,0};
   7.211 +   HashEntry       *rawHashEntry;
   7.212 +   VSsPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
   7.213 +   void           **args;
   7.214 +   VSsTaskStub     *taskStub;
   7.215 +   VSsTaskType     *taskType;
   7.216 +   VSsTaskStubCarrier *taskCarrier;
   7.217 +   
   7.218 +   HashTable *
   7.219 +   argPtrHashTbl = semEnv->argPtrHashTbl;
   7.220 +   
   7.221 +         DEBUG__printf1(dbgRqstHdlr,"SendType request from processor %d",semReq->callingSlv->slaveID)
   7.222 + 
   7.223 +   /* ==========================  creation  ========================== 
   7.224 +    * 
   7.225 +    *At creation, make a task-stub.  Set the count of blocking propendents
   7.226 +    * to the number of controlled arguments (a task can have
   7.227 +    * arguments that are not controlled by the language, like simple integer
   7.228 +    * inputs from the sequential portion. Note that all controlled arguments
   7.229 +    * are pointers, and marked as controlled in the application code).
   7.230 +    */
   7.231 +   args     = semReq->args;
   7.232 +   taskType = semReq->taskType;
   7.233 +   taskStub = create_task_stub( taskType, args );//copies arg ptrs
   7.234 +   taskStub->numBlockingProp = taskType->numCtldArgs;
   7.235 +   
   7.236 +   /*The controlled arguments are then processed one by one.
   7.237 +    *Processing an argument means getting the hash of the pointer.  Then,
   7.238 +    * looking up the hash entry.  (If none, create one).
   7.239 +    */
   7.240 +   int32 argNum;
   7.241 +   for( argNum = 0; argNum < taskType->numCtldArgs; argNum++ )
   7.242 +    { 
   7.243 +      key[0] = (int64)args[argNum];
   7.244 +
   7.245 +      //key[2] acts as the 0 that terminates the string
   7.246 +//BUG!  need new hash function that works on *pointers with zeros in*
   7.247 +      /*If the hash entry was chained, put it at the
   7.248 +       * start of the chain.  (Means no-longer-used pointers accumulate
   7.249 +       * at end of chain, decide garbage collection later) */
   7.250 +      rawHashEntry = getEntryFromTable( (char *)key, argPtrHashTbl );
   7.251 +      ptrEntry = (VSsPointerEntry *)rawHashEntry->content;
   7.252 +      if( ptrEntry == NULL )
   7.253 +       { ptrEntry = create_pointer_entry_and_insert( args[argNum] );
   7.254 +       }
   7.255 +      
   7.256 +      /*Have the hash entry.
   7.257 +       *If the arg is a reader and the entry does not have an enabled
   7.258 +       * non-finished writer, and the queue is empty. */
   7.259 +      if( taskType->argTypes[argNum] == READER )
   7.260 +       { if( !ptrEntry->hasEnabledNonFinishedWriter && 
   7.261 +              isEmptyPrivQ( ptrEntry->waitersQ ) )
   7.262 +          { /*The reader is free.  So, decrement the blocking-propendent
   7.263 +             * count in the task-stub. If the count is zero, then put the
   7.264 +             * task-stub into the readyQ.  At the same time, increment
   7.265 +             * the hash-entry's count of enabled and non-finished readers.*/
   7.266 +            taskStub->numBlockingProp -= 1;
   7.267 +            if( taskStub->numBlockingProp == 0 )
   7.268 +             { writeQ( taskStub, semEnv->taskReadyQ );
   7.269 +             }
   7.270 +            ptrEntry->numEnabledNonDoneReaders += 1;
   7.271 +          }
   7.272 +         else
   7.273 +          { /*Otherwise, the reader is put into the hash-entry's Q of
   7.274 +             * waiters*/
   7.275 +            taskCarrier = create_task_carrier( taskStub, argNum, READER );
   7.276 +            writeQ( taskCarrier, ptrEntry->waitersQ );
   7.277 +          }
   7.278 +       }
   7.279 +      else //arg is a writer
   7.280 +       { /*the arg is a writer, plus the entry does not have a current
   7.281 +          * writer, plus the number of enabled non-finished readers is
   7.282 +          * zero, (the Q must be empty, else bug!) then the writer is free*/
   7.283 +         if( !ptrEntry->hasEnabledNonFinishedWriter &&
   7.284 +              ptrEntry->numEnabledNonDoneReaders == 0 )
   7.285 +          { /*Mark the entry has having a enabled and non-finished writer.
   7.286 +              * Decrement the blocking-propenden count in the writer's
   7.287 +              * task-stub. If the count is zero, then put the task-stub
   7.288 +              * into the readyQ.*/
   7.289 +            taskStub->numBlockingProp -= 1;
   7.290 +            if( taskStub->numBlockingProp == 0 )
   7.291 +             { writeQ( taskStub, semEnv->taskReadyQ );
   7.292 +             }
   7.293 +            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   7.294 +          }
   7.295 +         else
   7.296 +          {/*Otherwise, put the writer into the entry's Q of waiters.*/
   7.297 +            taskCarrier = create_task_carrier( taskStub, argNum, WRITER );
   7.298 +            writeQ( taskCarrier, ptrEntry->waitersQ );            
   7.299 +          }
   7.300 +       }
   7.301 +    } //for argNum
   7.302 +   
   7.303 +   
   7.304 +   resume_slaveVP( semReq->callingSlv, semEnv );
   7.305 +
   7.306 +   return;
   7.307 + }
   7.308 +
   7.309 +
   7.310 +
   7.311 +/* ========================== end of task ===========================
   7.312 + * 
   7.313 + *At the end of a task,
   7.314 + *The task's controlled arguments are processed one by one.
   7.315 + *Processing an argument means getting the hash of the pointer.  Then,
   7.316 + * looking up the hash entry (and putting the entry at the start of the
   7.317 + * chain, if there was a chain).
   7.318 + *With the hash entry:
   7.319 + *
   7.320 + *If the arg is a reader, then decrement the enabled and non-finished
   7.321 + * reader-count in the hash-entry. If the count becomes zero, then take
   7.322 + * the next entry from the Q. It should be a writer, or else there's a
   7.323 + * bug in this algorithm.
   7.324 + *Set the hash-entry to have an enabled non-finished writer.  Decrement
   7.325 + * the blocking-propendent-count of the writer's task-stub.  If the count
   7.326 + * has reached zero, then put the task-stub into the readyQ.
   7.327 + * 
   7.328 + *If the arg is a writer, then clear the enabled non-finished writer flag
   7.329 + * of the hash-entry. Take the next entry from the waiters Q. 
   7.330 + *If it is a writer, then turn the flag back on.  Decrement the writer's
   7.331 + * blocking-propendent-count in its task-stub.  If it becomes zero, then
   7.332 + * put the task-stub into the readyQ.
   7.333 + *
   7.334 + *If waiter is a reader, then do a loop, getting all waiting readers.
   7.335 + * For each, increment the hash-entry's count of enabled
   7.336 + * non-finished readers.  Decrement the blocking propendents count of the
   7.337 + * reader's task-stub.  If it reaches zero, then put the task-stub into the
   7.338 + * readyQ.
   7.339 + *Repeat until encounter a writer -- put that writer back into the Q.
   7.340 + */
   7.341 +void
   7.342 +handleEndTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
   7.343 + { int64             key[] = {0,0,0};
   7.344 +   HashEntry        *rawHashEntry;
   7.345 +   VSsPointerEntry  *entry; //contents of hash table entry for an arg pointer
   7.346 +   void            **args;
   7.347 +   VSsTaskStub      *endingTaskStub, *waitingTaskStub;
   7.348 +   VSsTaskType      *endingTaskType;
   7.349 +   VSsWaiterCarrier *waitingTaskCarrier;
   7.350 +   
   7.351 +   HashTable *
   7.352 +   ptrHashTbl = semEnv->argPtrHashTbl;
   7.353 +   
   7.354 +         DEBUG__printf1(dbgRqstHdlr,"SendType request from processor %d",semReq->callingSlv->slaveID)
   7.355 + 
   7.356 +   /* ========================== end of task ===========================
   7.357 +    *At the end of a task, the task-stub is sent in the request.
   7.358 +    */
   7.359 +   endingTaskStub =
   7.360 +                ((VSsSemData *)semReq->callingSlv->semanticData)->taskStub;
   7.361 +   args           = endingTaskStub->args;
   7.362 +   endingTaskType = endingTaskStub->taskType;
   7.363 +   
   7.364 +   /*The task's controlled arguments are processed one by one.
   7.365 +    *Processing an argument means getting the hash of the pointer.
   7.366 +    */
   7.367 +   int32 argNum;
   7.368 +   for( argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++ )
   7.369 +    { 
   7.370 +      key[0] = (int64)args[argNum];
   7.371 +
   7.372 +      //key[2] acts as the 0 that terminates the string
   7.373 +//BUG!  need new hash function that works on *pointers with zeros in*
   7.374 +      /*If the hash entry was chained, put it at the
   7.375 +       * start of the chain.  (Means no-longer-used pointers accumulate
   7.376 +       * at end of chain, decide garbage collection later) 
   7.377 +       *NOTE: could put pointer directly to hash entry into task-stub 
   7.378 +       * when do lookup during task creation.*/
   7.379 +      rawHashEntry = getEntryFromTable( (char *)key, ptrHashTbl );
   7.380 +      entry = (VSsPointerEntry *)rawHashEntry->content;
   7.381 +      if( entry == NULL ) 
   7.382 +          VMS_App__throw_exception("hash entry NULL", NULL, NULL);
   7.383 +      
   7.384 +      /*With the hash entry:  If the ending task was reader of this arg*/
   7.385 +      if( endingTaskType->argTypes[argNum] == READER )
   7.386 +       { /*then decrement the enabled and non-finished reader-count in
   7.387 +          * the hash-entry. */ 
   7.388 +         entry->numEnabledNonDoneReaders -= 1;
   7.389 +         
   7.390 +         /*If the count becomes zero, then take the next entry from the Q. It
   7.391 +          * should be a writer, or else there's a bug in this algorithm.*/
   7.392 +         if( entry->numEnabledNonDoneReaders == 0 )
   7.393 +          { waitingTaskCarrier = readQ( entry->waitersQ );
   7.394 +            waitingTaskStub = waitingTaskCarrier->taskStub;
   7.395 +            
   7.396 +            if( !waitingTaskCarrier->type == READER ) 
   7.397 +               VMS_App__throw_exception();
   7.398 +                   
   7.399 +            /*Set the hash-entry to have an enabled non-finished writer.*/
   7.400 +            entry->hasEnabledNonFinishedWriter = TRUE;
   7.401 +            
   7.402 +            /* Decrement the blocking-propendent-count of the writer's
   7.403 +             * task-stub.  If the count has reached zero, then put the
   7.404 +             * task-stub into the readyQ.*/
   7.405 +            waitingTaskStub->numBlockingProp -= 1;
   7.406 +            if( waitingTaskStub->numBlockingProp == 0 )
   7.407 +             { writeQ( waitingTaskStub, semEnv->taskReadyQ );
   7.408 +             }
   7.409 +          }
   7.410 +       }
   7.411 +      else /*the ending task is a writer of this arg*/ 
   7.412 +       { /*clear the enabled non-finished writer flag of the hash-entry.*/
   7.413 +         entry->hasEnabledNonFinishedWriter = FALSE;
   7.414 +         
   7.415 +         /*Take the next waiter from the hash-entry's Q.*/
   7.416 +         waitingTaskCarrier = readQ( entry->waitersQ );
   7.417 +         waitingTaskStub = waitingTaskCarrier->taskStub;
   7.418 +         
   7.419 +         /*If task is a writer of this hash-entry's pointer*/
   7.420 +         if( waitingTaskCarrier->type == WRITER ) 
   7.421 +          { /* then turn the flag back on.*/
   7.422 +            entry->hasEnabledNonFinishedWriter = TRUE;
   7.423 +            /*Decrement the writer's blocking-propendent-count in task-stub
   7.424 +             * If it becomes zero, then put the task-stub into the readyQ.*/
   7.425 +            waitingTaskStub->numBlockingProp -= 1;
   7.426 +            if( waitingTaskStub->numBlockingProp == 0 )
   7.427 +             { writeQ( waitingTaskStub, semEnv->taskReadyQ );
   7.428 +             }
   7.429 +          }
   7.430 +         else
   7.431 +          { /*Waiting task is a reader, so do a loop, of all waiting readers
   7.432 +             * until encounter a writer or waitersQ is empty*/
   7.433 +            while( TRUE ) /*The checks guarantee have a waiting reader*/
   7.434 +             { /*Increment the hash-entry's count of enabled non-finished
   7.435 +                * readers.*/
   7.436 +               entry->numEnabledNonDoneReaders += 1;
   7.437 +
   7.438 +               /*Decrement the blocking propendents count of the reader's
   7.439 +                * task-stub.  If it reaches zero, then put the task-stub
   7.440 +                * into the readyQ.*/
   7.441 +               waitingTaskStub->numBlockingProp -= 1;
   7.442 +               if( waitingTaskStub->numBlockingProp == 0 )
   7.443 +                { writeQ( waitingTaskStub, semEnv->taskReadyQ );
   7.444 +                }
   7.445 +               /*Get next waiting task*/
   7.446 +               waitingTaskCarrier = peekQ( entry->waitersQ );
   7.447 +               if( waitingTaskCarrier == NULL ) break;
   7.448 +               if( waitingTaskCarrier->type == WRITER ) break;
   7.449 +               waitingTaskCarrier = readQ( entry->waitersQ );               
   7.450 +               waitingTaskStub = waitingTaskCarrier->taskStub;
   7.451 +             }//while waiter is a reader
   7.452 +          }//first waiting task is a reader
   7.453 +       }//check of ending task, whether writer or reader
   7.454 +    }//for argnum in ending task
   7.455 +   
   7.456 +   //done ending the task, now free the stub + args copy
   7.457 +   VMS_PI__free( endingTaskStub );
   7.458 +   
   7.459 +   //Resume the slave that animated the task -- assigner will give new task
   7.460 +   ((VSsSemData *)semReq->callingSlv->semanticData)->needsTaskAssigned =
   7.461 +      TRUE;
   7.462 +   resume_slaveVP( semReq->callingSlv, semEnv );
   7.463 +
   7.464 +   return;
   7.465 + }
   7.466 +
   7.467 +
   7.468 +//==========================================================================
   7.469  /*
   7.470   */
   7.471  void
   7.472 -handleSubmitTask( VOMPSemReq *semReq, VOMPSemEnv *semEnv )
   7.473 - { SlaveVP   *sendPr;
   7.474 -   int          key[] = {0,0,0};
   7.475 -   VOMPSemReq *waitingReq;
   7.476 -   HashEntry   *entry;
   7.477 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   7.478 -   
   7.479 -         DEBUG__printf1(dbgRqstHdlr,"SendType request from processor %d",semReq->sendPr->slaveID)
   7.480 - 
   7.481 -   receivePr = semReq->receivePr; //For "send", know both send & recv procrs
   7.482 -   sendPr    = semReq->sendPr;
   7.483 -
   7.484 -         //TODO: handle transfer of msg-locs ownership
   7.485 -         //TODO: hash table implemented such that using "addEntry" or
   7.486 -         //  "addValue" to table causes the *value* in old entry to be
   7.487 -         //  *freed* -- this is bad.  Want to stack up values in a linked
   7.488 -         //  list when multiple have the same key.
   7.489 -
   7.490 -      //TODO: use a faster hash function -- see notes in intelligence gather
   7.491 -   key[0] = (int)receivePr->slaveID;
   7.492 -   key[1] = (int)(semReq->msgType);
   7.493 - //key[2] acts as the 0 that terminates the string
   7.494 -
   7.495 -   entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
   7.496 -   if( entry == NULL ) return;  //was just inserted
   7.497 -
   7.498 -   waitingReq = (VOMPSemReq *)entry->content;
   7.499 -
   7.500 -      //At this point, know have waiting request(s) -- either sends or recv
   7.501 -      //Note, can only have max of one receive waiting, and cannot have both
   7.502 -      // sends and receives waiting (they would have paired off)
   7.503 -      // but can have multiple sends from diff sending VPs, all same msg-type
   7.504 -   if( waitingReq->reqType == send_type )
   7.505 -    {    //waiting request is another send, so stack this up on list
   7.506 -         // but first clone the sending request so it persists.
   7.507 -      VOMPSemReq *clonedReq = cloneReq( semReq );
   7.508 -      clonedReq-> nextReqInHashEntry = waitingReq->nextReqInHashEntry;
   7.509 -      waitingReq->nextReqInHashEntry = clonedReq;
   7.510 -         DEBUG__printf2( dbgRqstHdlr, "linked requests: %p, %p ", clonedReq,\
   7.511 -                                                                 waitingReq )
   7.512 -      return;
   7.513 -    }
   7.514 -   else
   7.515 -    {    
   7.516 -       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.517 -        Dependency newd;
   7.518 -        newd.from_vp = sendPr->slaveID;
   7.519 -        newd.from_task = sendPr->assignCount;
   7.520 -        newd.to_vp = receivePr->slaveID;
   7.521 -        newd.to_task = receivePr->assignCount +1;
   7.522 -        //(newd,semEnv->commDependenciesList);  
   7.523 -        addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList);  
   7.524 -                int32 groupId = semReq->msgType;
   7.525 -        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
   7.526 -            makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
   7.527 -        }
   7.528 -        if(semEnv->ntonGroups[groupId] == NULL){
   7.529 -            semEnv->ntonGroups[groupId] = new_NtoN(groupId);
   7.530 -        }
   7.531 -        Unit u;
   7.532 -        u.vp = sendPr->slaveID;
   7.533 -        u.task = sendPr->assignCount;
   7.534 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
   7.535 -        u.vp = receivePr->slaveID;
   7.536 -        u.task = receivePr->assignCount +1;
   7.537 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
   7.538 -       #endif
   7.539 -
   7.540 -       //waiting request is a receive, so it pairs to this send
   7.541 -         //First, remove the waiting receive request from the entry
   7.542 -      entry->content = waitingReq->nextReqInHashEntry;
   7.543 -      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
   7.544 -      
   7.545 -      if( entry->content == NULL )
   7.546 -       {    //TODO: mod hash table to double-link, so can delete entry from
   7.547 -            // table without hashing the key and looking it up again
   7.548 -         deleteEntryFromTable( entry->key, commHashTbl );  //frees hashEntry
   7.549 -       }
   7.550 -      
   7.551 -         //attach msg that's in this send request to receiving procr
   7.552 -         // when comes back from suspend will have msg in dataRetFromReq
   7.553 -      receivePr->dataRetFromReq = semReq->msg;
   7.554 -
   7.555 -         //bring both processors back from suspend
   7.556 -      resume_slaveVP( sendPr,    semEnv );
   7.557 -      resume_slaveVP( receivePr, semEnv );
   7.558 -
   7.559 -      return;
   7.560 -    }
   7.561 - }
   7.562 -
   7.563 -
   7.564 -/*Looks like can make single handler for both sends..
   7.565 - */
   7.566 -//TODO: combine both send handlers into single handler
   7.567 -void
   7.568 -handleSendFromTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
   7.569 - { SlaveVP   *sendPr, *receivePr;
   7.570 -   int          key[] = {0,0,0};
   7.571 -   VOMPSemReq *waitingReq;
   7.572 -   HashEntry   *entry;
   7.573 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   7.574 -
   7.575 -         DEBUG__printf2(dbgRqstHdlr,"SendFromTo request from processor %d to %d",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
   7.576 -   
   7.577 -   receivePr = semReq->receivePr; //For "send", know both send & recv procrs
   7.578 -   sendPr    = semReq->sendPr;    
   7.579 -   
   7.580 -       
   7.581 -   key[0] = (int)receivePr->slaveID;
   7.582 -   key[1] = (int)sendPr->slaveID;
   7.583 - //key[2] acts at the 0 that terminates the string
   7.584 -
   7.585 -   entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
   7.586 -   if( entry == NULL ) return;  //was just inserted
   7.587 -
   7.588 -   waitingReq = (VOMPSemReq *)entry->content;
   7.589 -
   7.590 -      //At this point, know have waiting request(s) -- either sends or recv
   7.591 -   if( waitingReq->reqType == send_from_to )
   7.592 -    { printf("\n ERROR: shouldn't be two send-from-tos waiting \n");
   7.593 -    }
   7.594 -   else
   7.595 -    {    //waiting request is a receive, so it completes pair with this send
   7.596 -      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.597 -        Dependency newd;
   7.598 -        newd.from_vp = sendPr->slaveID;
   7.599 -        newd.from_task = sendPr->assignCount;
   7.600 -        newd.to_vp = receivePr->slaveID;
   7.601 -        newd.to_task = receivePr->assignCount +1;
   7.602 -        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   7.603 -        addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   7.604 -      #endif 
   7.605 -         //First, remove the waiting receive request from the entry
   7.606 -      entry->content = waitingReq->nextReqInHashEntry;
   7.607 -      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
   7.608 -      
   7.609 -         //can only be one waiting req for "from-to" semantics
   7.610 -      if( entry->content != NULL )
   7.611 -       {
   7.612 -         printf("\nERROR in handleSendFromTo\n");
   7.613 -       }
   7.614 -      deleteEntryFromTable( entry->key, commHashTbl );  //frees HashEntry
   7.615 -
   7.616 -         //attach msg that's in this send request to receiving procr
   7.617 -         // when comes back from suspend, will have msg in dataRetFromReq
   7.618 -      receivePr->dataRetFromReq = semReq->msg;
   7.619 -
   7.620 -         //bring both processors back from suspend
   7.621 -      resume_slaveVP( sendPr,    semEnv );
   7.622 -      resume_slaveVP( receivePr, semEnv );
   7.623 -            
   7.624 -      return;
   7.625 -    }
   7.626 - }
   7.627 -
   7.628 -
   7.629 -
   7.630 -//==============================  Receives  ===========================
   7.631 -//
   7.632 -/*Removed this one for now, because forces either a search or going to a
   7.633 - * two-level hash table, where one level the key is the receivePr, in the
   7.634 - * other level, the key is the type.
   7.635 - *So, each dest procr that either does a receive_type or that a send_type
   7.636 - * targets it, would have a hash table created just for it and placed
   7.637 - * into the first-level hash table entry for that receive procr.
   7.638 - *Then, doing a receive_type first looks up entry for receive procr in first
   7.639 - * table, gets the type-table out of that entry, and does a second lookup
   7.640 - * in the type-table.
   7.641 - *Doing a receive from-to looks up in the first table, gets the second table
   7.642 - * hashed on "from" procr.
   7.643 - *Doing a receive_any looks up in the first table, then looks to see if
   7.644 - * either of the hash tables have any entries -- would then have to do a
   7.645 - * linear search through the hash-table's array for the first non-empty
   7.646 - * spot
   7.647 - *Yuck.
   7.648 - *
   7.649 - *Alternatively, could keep two hash tables updated all the time -- one that
   7.650 - * does the receive_type and receive_from_to and a second that does
   7.651 - * receive_any -- would only hash the second table by the receive procr.
   7.652 - * When remove from one table, keep back-links to both tables, so can also
   7.653 - * quickly remove from other table.
   7.654 - *Cost is doing two hash-table lookups for every insert.
   7.655 - * If ever add receive_any, looking like this second option easier and even
   7.656 - * less costly.
   7.657 - */
   7.658 -void
   7.659 -handleReceiveAny( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
   7.660 - {
   7.661 - 
   7.662 - }
   7.663 -
   7.664 -
   7.665 -void
   7.666 -handleReceiveType( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
   7.667 - { SlaveVP   *sendPr, *receivePr;
   7.668 -   int          key[] = {0,0,0};
   7.669 -   VOMPSemReq *waitingReq;
   7.670 -   HashEntry   *entry;
   7.671 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   7.672 -
   7.673 -   receivePr = semReq->receivePr;
   7.674 -
   7.675 -         DEBUG__printf1(dbgRqstHdlr,"ReceiveType request from processor %d",receivePr->slaveID)
   7.676 -   
   7.677 -   key[0] = (int)receivePr->slaveID;
   7.678 -   key[1] = (int)(semReq->msgType);
   7.679 - //key[2] acts as the 0 that terminates the string
   7.680 -
   7.681 -
   7.682 -   entry = giveEntryElseInsertReqst((char*)key, semReq, commHashTbl);//clones
   7.683 -   if( entry == NULL ) return;  //was just inserted
   7.684 -
   7.685 -   waitingReq = (VOMPSemReq *)entry->content;  //previously cloned by insert
   7.686 -
   7.687 -      //At this point, know have waiting request(s) -- should be send(s)
   7.688 -   if( waitingReq->reqType == send_type )
   7.689 -    {    //waiting request is a send, so pair it with this receive
   7.690 -         //first, remove the waiting send request from the list in entry
   7.691 -
   7.692 -      entry->content = waitingReq->nextReqInHashEntry;
   7.693 -      if( entry->content == NULL )
   7.694 -       { deleteEntryFromTable( entry->key, commHashTbl );  //frees HashEntry
   7.695 -       }
   7.696 -      
   7.697 -         //attach msg that's in the send request to receiving procr
   7.698 -         // when comes back from suspend, will have msg in dataRetFromReq
   7.699 -      receivePr->dataRetFromReq = waitingReq->msg;
   7.700 -
   7.701 -         //bring both processors back from suspend
   7.702 -      sendPr = waitingReq->sendPr;
   7.703 -      VMS_PI__free( waitingReq );
   7.704 -
   7.705 -       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.706 -        Dependency newd;
   7.707 -        newd.from_vp = sendPr->slaveID;
   7.708 -        newd.from_task = sendPr->assignCount;
   7.709 -        newd.to_vp = receivePr->slaveID;
   7.710 -        newd.to_task = receivePr->assignCount +1;
   7.711 -        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   7.712 -        addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList); 
   7.713 -        int32 groupId = semReq->msgType;
   7.714 -        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
   7.715 -            makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
   7.716 -        }
   7.717 -        if(semEnv->ntonGroups[groupId] == NULL){
   7.718 -            semEnv->ntonGroups[groupId] = new_NtoN(groupId);
   7.719 -        }
   7.720 -        Unit u;
   7.721 -        u.vp = sendPr->slaveID;
   7.722 -        u.task = sendPr->assignCount;
   7.723 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
   7.724 -        u.vp = receivePr->slaveID;
   7.725 -        u.task = receivePr->assignCount +1;
   7.726 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
   7.727 -       #endif
   7.728 -      
   7.729 -      resume_slaveVP( sendPr,    semEnv );
   7.730 -      resume_slaveVP( receivePr, semEnv );
   7.731 -
   7.732 -      return;
   7.733 -    }
   7.734 -   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   7.735 - }
   7.736 -
   7.737 -
   7.738 -/*
   7.739 - */
   7.740 -void
   7.741 -handleReceiveFromTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
   7.742 - { SlaveVP     *sendPr, *receivePr;
   7.743 -   int          key[] = {0,0,0};
   7.744 -   VOMPSemReq   *waitingReq;
   7.745 -   HashEntry   *entry;
   7.746 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   7.747 -
   7.748 -         DEBUG__printf2(dbgRqstHdlr,"ReceiveFromTo %d : %d",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
   7.749 -   
   7.750 -   receivePr = semReq->receivePr;
   7.751 -   sendPr    = semReq->sendPr;    //for receive from-to, know send procr
   7.752 -
   7.753 -   key[0] = (int)receivePr->slaveID;
   7.754 -   key[1] = (int)sendPr->slaveID;
   7.755 - //key[2] acts at the 0 that terminates the string
   7.756 -
   7.757 -   entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
   7.758 -   if( entry == NULL ) return;  //was just inserted
   7.759 -
   7.760 -   waitingReq = (VOMPSemReq *)entry->content;
   7.761 -
   7.762 -      //At this point, know have waiting request(s) -- should be send(s)
   7.763 -   if( waitingReq->reqType == send_from_to )
   7.764 -    {    //waiting request is a send, so pair it with this receive
   7.765 -      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.766 -        Dependency newd;
   7.767 -        newd.from_vp = sendPr->slaveID;
   7.768 -        newd.from_task = sendPr->assignCount;
   7.769 -        newd.to_vp = receivePr->slaveID;
   7.770 -        newd.to_task = receivePr->assignCount +1;
   7.771 -        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   7.772 -        addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);    
   7.773 -      #endif  
   7.774 -         //For from-to, should only ever be a single reqst waiting tobe paird
   7.775 -      entry->content = waitingReq->nextReqInHashEntry;
   7.776 -      if( entry->content != NULL ) printf("\nERROR in handleRecvFromTo\n");
   7.777 -      deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   7.778 -
   7.779 -         //attach msg that's in the send request to receiving procr
   7.780 -         // when comes back from suspend, will have msg in dataRetFromReq
   7.781 -      receivePr->dataRetFromReq = waitingReq->msg;
   7.782 -
   7.783 -         //bring both processors back from suspend
   7.784 -      sendPr = waitingReq->sendPr;
   7.785 -      VMS_PI__free( waitingReq );
   7.786 -
   7.787 -      resume_slaveVP( sendPr,    semEnv );
   7.788 -      resume_slaveVP( receivePr, semEnv );
   7.789 -
   7.790 -      return;
   7.791 -    }
   7.792 -   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   7.793 - }
   7.794 -
   7.795 -
   7.796 -
   7.797 -//===============================================
   7.798 -void
   7.799 -handleTransferTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
   7.800 - {
   7.801 -
   7.802 - }
   7.803 -
   7.804 -void
   7.805 -handleTransferOut( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
   7.806 - {
   7.807 -
   7.808 - }
   7.809 -
   7.810 -
   7.811 -/*
   7.812 - */
   7.813 -void
   7.814 -handleMalloc( VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv )
   7.815 +handleMalloc( VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv )
   7.816   { void *ptr;
   7.817   
   7.818        DEBUG__printf1(dbgRqstHdlr,"Malloc request from processor %d",requestingPr->slaveID)
   7.819 @@ -422,7 +478,7 @@
   7.820  /*
   7.821   */
   7.822  void
   7.823 -handleFree( VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv )
   7.824 +handleFree( VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv )
   7.825   {
   7.826           DEBUG__printf1(dbgRqstHdlr,"Free request from processor %d",requestingPr->slaveID)
   7.827     VMS_PI__free( semReq->ptrToFree );
   7.828 @@ -436,8 +492,8 @@
   7.829   * end-label.  Else, sets flag and resumes normally.
   7.830   */
   7.831  void inline
   7.832 -handleStartSingleton_helper( VOMPSingleton *singleton, SlaveVP *reqstingPr,
   7.833 -                             VOMPSemEnv    *semEnv )
   7.834 +handleStartSingleton_helper( VSsSingleton *singleton, SlaveVP *reqstingPr,
   7.835 +                             VSsSemEnv    *semEnv )
   7.836   {
   7.837     if( singleton->hasFinished )
   7.838      {    //the code that sets the flag to true first sets the end instr addr
   7.839 @@ -459,22 +515,22 @@
   7.840      }
   7.841   }
   7.842  void inline
   7.843 -handleStartFnSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
   7.844 -                      VOMPSemEnv *semEnv )
   7.845 - { VOMPSingleton *singleton;
   7.846 +handleStartFnSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   7.847 +                      VSsSemEnv *semEnv )
   7.848 + { VSsSingleton *singleton;
   7.849           DEBUG__printf1(dbgRqstHdlr,"StartFnSingleton request from processor %d",requestingPr->slaveID)
   7.850  
   7.851     singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
   7.852     handleStartSingleton_helper( singleton, requestingPr, semEnv );
   7.853   }
   7.854  void inline
   7.855 -handleStartDataSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
   7.856 -                      VOMPSemEnv *semEnv )
   7.857 - { VOMPSingleton *singleton;
   7.858 +handleStartDataSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   7.859 +                      VSsSemEnv *semEnv )
   7.860 + { VSsSingleton *singleton;
   7.861  
   7.862           DEBUG__printf1(dbgRqstHdlr,"StartDataSingleton request from processor %d",requestingPr->slaveID)
   7.863     if( *(semReq->singletonPtrAddr) == NULL )
   7.864 -    { singleton                 = VMS_PI__malloc( sizeof(VOMPSingleton) );
   7.865 +    { singleton                 = VMS_PI__malloc( sizeof(VSsSingleton) );
   7.866        singleton->waitQ          = makeVMSQ();
   7.867        singleton->endInstrAddr   = 0x0;
   7.868        singleton->hasBeenStarted = FALSE;
   7.869 @@ -488,8 +544,8 @@
   7.870  
   7.871  
   7.872  void inline
   7.873 -handleEndSingleton_helper( VOMPSingleton *singleton, SlaveVP *requestingPr,
   7.874 -                           VOMPSemEnv    *semEnv )
   7.875 +handleEndSingleton_helper( VSsSingleton *singleton, SlaveVP *requestingPr,
   7.876 +                           VSsSemEnv    *semEnv )
   7.877   { PrivQueueStruc *waitQ;
   7.878     int32           numWaiting, i;
   7.879     SlaveVP      *resumingPr;
   7.880 @@ -514,10 +570,10 @@
   7.881  
   7.882  }
   7.883  void inline
   7.884 -handleEndFnSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
   7.885 -                        VOMPSemEnv *semEnv )
   7.886 +handleEndFnSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   7.887 +                        VSsSemEnv *semEnv )
   7.888   {
   7.889 -   VOMPSingleton   *singleton;
   7.890 +   VSsSingleton   *singleton;
   7.891  
   7.892           DEBUG__printf1(dbgRqstHdlr,"EndFnSingleton request from processor %d",requestingPr->slaveID)
   7.893     
   7.894 @@ -525,10 +581,10 @@
   7.895     handleEndSingleton_helper( singleton, requestingPr, semEnv );
   7.896    }
   7.897  void inline
   7.898 -handleEndDataSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
   7.899 -                        VOMPSemEnv *semEnv )
   7.900 +handleEndDataSingleton( VSsSemReq *semReq, SlaveVP *requestingPr,
   7.901 +                        VSsSemEnv *semEnv )
   7.902   {
   7.903 -   VOMPSingleton   *singleton;
   7.904 +   VSsSingleton   *singleton;
   7.905  
   7.906           DEBUG__printf1(dbgRqstHdlr,"EndDataSingleton request from processor %d",requestingPr->slaveID)
   7.907     
   7.908 @@ -541,7 +597,7 @@
   7.909   * pointer out of the request and call it, then resume the VP.
   7.910   */
   7.911  void
   7.912 -handleAtomic( VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv )
   7.913 +handleAtomic( VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv )
   7.914   {
   7.915           DEBUG__printf1(dbgRqstHdlr,"Atomic request from processor %d",requestingPr->slaveID)
   7.916     semReq->fnToExecInMaster( semReq->dataForFn );
   7.917 @@ -563,9 +619,9 @@
   7.918   *If NULL, then write requesting into the field and resume.
   7.919   */
   7.920  void
   7.921 -handleTransStart( VOMPSemReq *semReq, SlaveVP *requestingPr,
   7.922 -                  VOMPSemEnv *semEnv )
   7.923 - { VOMPSemData *semData;
   7.924 +handleTransStart( VSsSemReq *semReq, SlaveVP *requestingPr,
   7.925 +                  VSsSemEnv *semEnv )
   7.926 + { VSsSemData *semData;
   7.927     TransListElem *nextTransElem;
   7.928  
   7.929           DEBUG__printf1(dbgRqstHdlr,"TransStart request from processor %d",requestingPr->slaveID)
   7.930 @@ -585,7 +641,7 @@
   7.931     semData->lastTransEntered = nextTransElem;
   7.932  
   7.933        //get the structure for this transaction ID
   7.934 -   VOMPTrans *
   7.935 +   VSsTrans *
   7.936     transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
   7.937  
   7.938     if( transStruc->VPCurrentlyExecuting == NULL )
   7.939 @@ -616,10 +672,10 @@
   7.940   * resume both.
   7.941   */
   7.942  void
   7.943 -handleTransEnd(VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv)
   7.944 - { VOMPSemData    *semData;
   7.945 +handleTransEnd(VSsSemReq *semReq, SlaveVP *requestingPr, VSsSemEnv *semEnv)
   7.946 + { VSsSemData    *semData;
   7.947     SlaveVP     *waitingPr;
   7.948 -   VOMPTrans      *transStruc;
   7.949 +   VSsTrans      *transStruc;
   7.950     TransListElem *lastTrans;
   7.951     
   7.952           DEBUG__printf1(dbgRqstHdlr,"TransEnd request from processor %d",requestingPr->slaveID)
     8.1 --- a/VSs_Request_Handlers.h	Thu May 24 07:34:21 2012 -0700
     8.2 +++ b/VSs_Request_Handlers.h	Wed May 30 15:02:38 2012 -0700
     8.3 @@ -6,51 +6,41 @@
     8.4   *
     8.5   */
     8.6  
     8.7 -#ifndef _VOMP_REQ_H
     8.8 -#define	_VOMP_REQ_H
     8.9 +#ifndef _VSs_REQ_H
    8.10 +#define	_VSs_REQ_H
    8.11  
    8.12 -#include "VOMP.h"
    8.13 +#include "VSs.h"
    8.14  
    8.15 -/*This header defines everything specific to the VOMP semantic plug-in
    8.16 +/*This header defines everything specific to the VSs semantic plug-in
    8.17   */
    8.18  
    8.19  inline void
    8.20 -handleSendType( VOMPSemReq *semReq, VOMPSemEnv *semEnv);
    8.21 +handleSubmitTask( VSsSemReq *semReq, VSsSemEnv *semEnv);
    8.22  inline void
    8.23 -handleSendFromTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv);
    8.24 +handleEndTask( VSsSemReq *semReq, VSsSemEnv *semEnv);
    8.25  inline void
    8.26 -handleReceiveAny( VOMPSemReq *semReq, VOMPSemEnv *semEnv);
    8.27 +handleMalloc( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
    8.28  inline void
    8.29 -handleReceiveType( VOMPSemReq *semReq, VOMPSemEnv *semEnv);
    8.30 +handleFree( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv );
    8.31  inline void
    8.32 -handleReceiveFromTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv);
    8.33 +handleTransEnd(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv*semEnv);
    8.34  inline void
    8.35 -handleTransferTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv);
    8.36 +handleTransStart( VSsSemReq *semReq, SlaveVP *requestingSlv,
    8.37 +                  VSsSemEnv *semEnv );
    8.38  inline void
    8.39 -handleTransferOut( VOMPSemReq *semReq, VOMPSemEnv *semEnv);
    8.40 +handleAtomic( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
    8.41  inline void
    8.42 -handleMalloc( VOMPSemReq *semReq, SlaveVP *requestingSlv, VOMPSemEnv *semEnv);
    8.43 +handleStartFnSingleton( VSsSemReq *semReq, SlaveVP *reqstingSlv,
    8.44 +                      VSsSemEnv *semEnv );
    8.45  inline void
    8.46 -handleFree( VOMPSemReq *semReq, SlaveVP *requestingSlv, VOMPSemEnv *semEnv );
    8.47 +handleEndFnSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
    8.48 +                    VSsSemEnv *semEnv );
    8.49  inline void
    8.50 -handleTransEnd(VOMPSemReq *semReq, SlaveVP *requestingSlv, VOMPSemEnv*semEnv);
    8.51 +handleStartDataSingleton( VSsSemReq *semReq, SlaveVP *reqstingSlv,
    8.52 +                      VSsSemEnv *semEnv );
    8.53  inline void
    8.54 -handleTransStart( VOMPSemReq *semReq, SlaveVP *requestingSlv,
    8.55 -                  VOMPSemEnv *semEnv );
    8.56 -inline void
    8.57 -handleAtomic( VOMPSemReq *semReq, SlaveVP *requestingSlv, VOMPSemEnv *semEnv);
    8.58 -inline void
    8.59 -handleStartFnSingleton( VOMPSemReq *semReq, SlaveVP *reqstingSlv,
    8.60 -                      VOMPSemEnv *semEnv );
    8.61 -inline void
    8.62 -handleEndFnSingleton( VOMPSemReq *semReq, SlaveVP *requestingSlv,
    8.63 -                    VOMPSemEnv *semEnv );
    8.64 -inline void
    8.65 -handleStartDataSingleton( VOMPSemReq *semReq, SlaveVP *reqstingSlv,
    8.66 -                      VOMPSemEnv *semEnv );
    8.67 -inline void
    8.68 -handleEndDataSingleton( VOMPSemReq *semReq, SlaveVP *requestingSlv,
    8.69 -                    VOMPSemEnv *semEnv );
    8.70 +handleEndDataSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
    8.71 +                    VSsSemEnv *semEnv );
    8.72  
    8.73 -#endif	/* _VOMP_REQ_H */
    8.74 +#endif	/* _VSs_REQ_H */
    8.75