changeset 16:1ffd5df22df9 dev_expl_VP_and_DKU

add CG instrumentation; still missing WaR hazard constraints
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Tue, 28 Aug 2012 15:33:16 +0200
parents 459055db7fc0
children f83fff8bd4b2
files Measurement/VSs_Counter_Recording.c Measurement/VSs_Counter_Recording.h Measurement/dependency.c Measurement/dependency.h VSs.c VSs.h VSs_PluginFns.c VSs_Request_Handlers.c
diffstat 8 files changed, 1148 insertions(+), 1119 deletions(-) [+]
line diff
     1.1 --- a/Measurement/VSs_Counter_Recording.c	Thu Aug 23 03:21:03 2012 -0700
     1.2 +++ b/Measurement/VSs_Counter_Recording.c	Tue Aug 28 15:33:16 2012 +0200
     1.3 @@ -28,14 +28,14 @@
     1.4      list->next_free_index++; 
     1.5  }
     1.6  
     1.7 -void VSs__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs)
     1.8 +void VSs__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs, uint64 cachem)
     1.9  {
    1.10      
    1.11      if (pr->typeOfVP == Master || pr->typeOfVP == Shutdown)
    1.12       { //Only save values for application work, done in a SlaveVP
    1.13          return;
    1.14       }
    1.15 -
    1.16 +    
    1.17      VSsSemEnv *semanticEnv = _VMSMasterEnv->semanticEnv;
    1.18              
    1.19      CounterEvent e;
    1.20 @@ -45,6 +45,7 @@
    1.21      
    1.22      e.cycles = cycles;
    1.23      e.instrs = instrs;
    1.24 +    e.cachem = cachem;
    1.25      
    1.26      if(pr){
    1.27          e.coreID = pr->coreAnimatedBy;
    1.28 @@ -112,7 +113,7 @@
    1.29           default:
    1.30               fprintf(counterfile, "unknown event");
    1.31       }
    1.32 -     fprintf(counterfile,", %d, %d, %llu, %llu",e->vp,e->task,e->cycles,e->instrs);
    1.33 +     fprintf(counterfile,", %d, %d, %llu, %llu, %llu",e->vp,e->task,e->cycles,e->instrs,e->cachem);
    1.34       if(e->coreID >=0)
    1.35           fprintf(counterfile,", %d",e->coreID);
    1.36       fprintf(counterfile,"\n");
     2.1 --- a/Measurement/VSs_Counter_Recording.h	Thu Aug 23 03:21:03 2012 -0700
     2.2 +++ b/Measurement/VSs_Counter_Recording.h	Tue Aug 28 15:33:16 2012 +0200
     2.3 @@ -18,13 +18,14 @@
     2.4     int task;
     2.5     uint64 cycles;
     2.6     uint64 instrs;
     2.7 +   uint64 cachem;
     2.8  } CounterEvent;
     2.9  
    2.10  FILE* counterfile;
    2.11  
    2.12  void VSs__init_counter_data_structs();
    2.13  
    2.14 -void VSs__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs);
    2.15 +void VSs__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs, uint64 cachem);
    2.16  
    2.17  void set_counter_file(FILE* f);
    2.18  
     3.1 --- a/Measurement/dependency.c	Thu Aug 23 03:21:03 2012 -0700
     3.2 +++ b/Measurement/dependency.c	Tue Aug 28 15:33:16 2012 +0200
     3.3 @@ -36,6 +36,18 @@
     3.4      fprintf(dependency_file,"commDep,%d,%d,%d,%d\n",dep->from_vp,dep->from_task,dep->to_vp,dep->to_task);
     3.5  }
     3.6  
     3.7 +void print_data_dependency_to_file(void* _dep){
     3.8 +    Dependency* dep = (Dependency*) _dep;
     3.9 +    if(!dep) return;
    3.10 +    fprintf(dependency_file,"dataDep,%d,%d,%d,%d\n",dep->from_vp,dep->from_task,dep->to_vp,dep->to_task);
    3.11 +}
    3.12 +
    3.13 +void print_singleton_dependency_to_file(void* _dep){
    3.14 +    Dependency* dep = (Dependency*) _dep;
    3.15 +    if(!dep) return;
    3.16 +    fprintf(dependency_file,"singDep,%d,%d,%d,%d\n",dep->from_vp,dep->from_task,dep->to_vp,dep->to_task);
    3.17 +}
    3.18 +
    3.19  void print_dyn_dependency_to_file(void* _dep){
    3.20      Dependency* dep = (Dependency*) _dep;
    3.21      if(!dep) return;
     4.1 --- a/Measurement/dependency.h	Thu Aug 23 03:21:03 2012 -0700
     4.2 +++ b/Measurement/dependency.h	Tue Aug 28 15:33:16 2012 +0200
     4.3 @@ -42,6 +42,10 @@
     4.4  
     4.5  void print_comm_dependency_to_file(void* _dep);
     4.6  
     4.7 +void print_data_dependency_to_file(void* _dep);
     4.8 +
     4.9 +void print_singleton_dependency_to_file(void* _dep);
    4.10 +
    4.11  void print_dyn_dependency_to_file(void* _dep);
    4.12  
    4.13  void print_hw_dependency_to_file(void* _dep);
     5.1 --- a/VSs.c	Thu Aug 23 03:21:03 2012 -0700
     5.2 +++ b/VSs.c	Tue Aug 28 15:33:16 2012 +0200
     5.3 @@ -97,7 +97,8 @@
     5.4     parentTaskStub->isEnded = TRUE;
     5.5     parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
     5.6     threadTaskStub->parentTaskStub = parentTaskStub;
     5.7 -   
     5.8 +   threadTaskStub->slaveAssignedTo = seedSlv;
     5.9 +
    5.10     semData = (VSsSemData *)seedSlv->semanticData;
    5.11        //seedVP is a thread, so has a permanent task
    5.12     semData->needsTaskAssigned = FALSE;
    5.13 @@ -260,6 +261,8 @@
    5.14     semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
    5.15     semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
    5.16     semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
    5.17 +   semanticEnv->dataDependenciesList = makeListOfArrays(sizeof(Dependency),128);
    5.18 +   semanticEnv->singletonDependenciesList = makeListOfArrays(sizeof(Dependency),128);
    5.19     semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
    5.20     
    5.21     semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
    5.22 @@ -304,6 +307,8 @@
    5.23          forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
    5.24          forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
    5.25          forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
    5.26 +        forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
    5.27 +        forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
    5.28          forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
    5.29          //fprintf(output,"}\n");
    5.30          fflush(output);
    5.31 @@ -337,6 +342,8 @@
    5.32          forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
    5.33          forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
    5.34          forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
    5.35 +        forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
    5.36 +        forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
    5.37          forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
    5.38          forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
    5.39          //fprintf(output,"}\n");
    5.40 @@ -353,6 +360,7 @@
    5.41     freeListOfArrays(semanticEnv->commDependenciesList);
    5.42     freeListOfArrays(semanticEnv->ctlDependenciesList);
    5.43     freeListOfArrays(semanticEnv->dynDependenciesList);
    5.44 +   freeListOfArrays(semanticEnv->dataDependenciesList);
    5.45     
    5.46     #endif
    5.47  #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS    
     6.1 --- a/VSs.h	Thu Aug 23 03:21:03 2012 -0700
     6.2 +++ b/VSs.h	Tue Aug 28 15:33:16 2012 +0200
     6.3 @@ -62,6 +62,10 @@
     6.4     bool32       hasEnabledNonFinishedWriter;
     6.5     int32        numEnabledNonDoneReaders;
     6.6     PrivQueueStruc *waitersQ;
     6.7 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
     6.8 +   int lastWriterVP;
     6.9 +   int lastWriterTask;
    6.10 +#endif
    6.11   }
    6.12  VSsPointerEntry;
    6.13  
    6.14 @@ -111,6 +115,8 @@
    6.15     int32           hasBeenStarted;
    6.16     int32           hasFinished;
    6.17     PrivQueueStruc *waitQ;
    6.18 +   int executingVp;
    6.19 +   int executingTask;
    6.20   }
    6.21  VSsSingleton;
    6.22  
    6.23 @@ -197,9 +203,11 @@
    6.24     ListOfArrays* unitList;
    6.25     ListOfArrays* ctlDependenciesList;
    6.26     ListOfArrays* commDependenciesList;
    6.27 +   ListOfArrays* dataDependenciesList;
    6.28     NtoN** ntonGroups;
    6.29     PrivDynArrayInfo* ntonGroupsInfo;
    6.30     ListOfArrays* dynDependenciesList;
    6.31 +   ListOfArrays* singletonDependenciesList;
    6.32     Unit last_in_slot[NUM_CORES * NUM_ANIM_SLOTS];
    6.33     ListOfArrays* hwArcs;
    6.34     #endif
     7.1 --- a/VSs_PluginFns.c	Thu Aug 23 03:21:03 2012 -0700
     7.2 +++ b/VSs_PluginFns.c	Tue Aug 28 15:33:16 2012 +0200
     7.3 @@ -13,19 +13,20 @@
     7.4  
     7.5  //=========================== Local Fn Prototypes ===========================
     7.6  void
     7.7 -resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv );
     7.8 +resume_slaveVP(SlaveVP *slave, VSsSemEnv *semEnv);
     7.9  
    7.10  inline void
    7.11 -handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv );
    7.12 +handleSemReq(VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
    7.13  
    7.14  inline void
    7.15 -handleDissipate(                SlaveVP *requestingSlv, VSsSemEnv *semEnv );
    7.16 +handleDissipate(SlaveVP *requestingSlv, VSsSemEnv *semEnv);
    7.17  
    7.18  inline void
    7.19 -handleCreate(    VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv );
    7.20 +handleCreate(VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
    7.21  
    7.22  //============================== Assigner ==================================
    7.23  //
    7.24 +
    7.25  /*The assigner is complicated by having both tasks and explicitly created
    7.26   * VPs, and by tasks being able to suspend.
    7.27   *It can't use an explicit slave to animate a task because of stack
    7.28 @@ -49,359 +50,349 @@
    7.29   * the junk tasks are allowed to leave behind.
    7.30   */
    7.31  SlaveVP *
    7.32 -VSs__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot )
    7.33 - { SlaveVP     *returnSlv;
    7.34 -   VSsSemEnv   *semEnv;
    7.35 -   VSsSemData  *semData;
    7.36 -   int32        coreNum, slotNum;
    7.37 -   VSsTaskStub *newTaskStub;
    7.38 -   SlaveVP     *extraSlv;
    7.39 -  
    7.40 -   coreNum = slot->coreSlotIsOn;
    7.41 -   slotNum = slot->slotIdx;
    7.42 -   
    7.43 -   semEnv  = (VSsSemEnv *)_semEnv;
    7.44 -   
    7.45 +VSs__assign_slaveVP_to_slot(void *_semEnv, AnimSlot *slot) {
    7.46 +    SlaveVP *returnSlv;
    7.47 +    VSsSemEnv *semEnv;
    7.48 +    VSsSemData *semData;
    7.49 +    int32 coreNum, slotNum;
    7.50 +    VSsTaskStub *newTaskStub;
    7.51 +    SlaveVP *extraSlv;
    7.52 +
    7.53 +    coreNum = slot->coreSlotIsOn;
    7.54 +    slotNum = slot->slotIdx;
    7.55 +
    7.56 +    semEnv = (VSsSemEnv *) _semEnv;
    7.57 +
    7.58        //Check for suspended slaves that are ready to resume
    7.59 -   returnSlv = readPrivQ( semEnv->slavesReadyToResumeQ );
    7.60 -   if( returnSlv != NULL )  //Yes, have a slave, so return it.
    7.61 +    returnSlv = readPrivQ(semEnv->slavesReadyToResumeQ);
    7.62 +    if (returnSlv != NULL) //Yes, have a slave, so return it.
    7.63      { returnSlv->coreAnimatedBy   = coreNum;
    7.64      
    7.65           //have work, so reset Done flag (when work generated on other core)
    7.66 -      if( semEnv->coreIsDone[coreNum] == TRUE ) //reads are higher perf
    7.67 -         semEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
    7.68 -      goto ReturnTheSlv;
    7.69 +        if (semEnv->coreIsDone[coreNum] == TRUE) //reads are higher perf
    7.70 +            semEnv->coreIsDone[coreNum] = FALSE; //don't just write always
    7.71 +        goto ReturnTheSlv;
    7.72      }
    7.73     
    7.74        //If none, speculate will have a task, so get the slot slave
    7.75        //TODO: false sharing ?  (think not bad cause mostly read..)
    7.76 -   returnSlv = semEnv->slotTaskSlvs[coreNum][slotNum];
    7.77 -   
    7.78 -   semData = (VSsSemData *)returnSlv->semanticData;
    7.79 +    returnSlv = semEnv->slotTaskSlvs[coreNum][slotNum];
    7.80  
    7.81 -      //There is always a curr task slave, and it always needs a task
    7.82 -      // (task slaves that are resuming are in resumeQ)
    7.83 -   newTaskStub = readPrivQ( semEnv->taskReadyQ );
    7.84 -   if( newTaskStub != NULL )
    7.85 -    {    //point slave to task's function, and mark slave as having task
    7.86 -      VMS_int__reset_slaveVP_to_TopLvlFn( returnSlv, 
    7.87 -                          newTaskStub->taskType->fn, newTaskStub->args );
    7.88 -      semData->taskStub            = newTaskStub;
    7.89 -      newTaskStub->slaveAssignedTo = returnSlv;
    7.90 -      semData->needsTaskAssigned   = FALSE;
    7.91 +    semData = (VSsSemData *) returnSlv->semanticData;
    7.92 +
    7.93 +    //There is always a curr task slave, and it always needs a task
    7.94 +    // (task slaves that are resuming are in resumeQ)
    7.95 +    newTaskStub = readPrivQ(semEnv->taskReadyQ);
    7.96 +    if (newTaskStub != NULL) { //point slave to task's function, and mark slave as having task
    7.97 +        VMS_int__reset_slaveVP_to_TopLvlFn(returnSlv,
    7.98 +                newTaskStub->taskType->fn, newTaskStub->args);
    7.99 +        semData->taskStub = newTaskStub;
   7.100 +        newTaskStub->slaveAssignedTo = returnSlv;
   7.101 +        semData->needsTaskAssigned = FALSE;
   7.102        
   7.103           //have work, so reset Done flag, if was set
   7.104 -      if( semEnv->coreIsDone[coreNum] == TRUE ) //reads are higher perf
   7.105 -         semEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
   7.106 -      goto ReturnTheSlv;
   7.107 -    }
   7.108 -   else
   7.109 -    {    //no task, so try to clean up unused extra task slaves
   7.110 -      extraSlv = readPrivQ( semEnv->freeExtraTaskSlvQ );
   7.111 -      if( extraSlv != NULL )
   7.112 +        if (semEnv->coreIsDone[coreNum] == TRUE) //reads are higher perf
   7.113 +            semEnv->coreIsDone[coreNum] = FALSE; //don't just write always
   7.114 +        goto ReturnTheSlv;
   7.115 +    } else { //no task, so try to clean up unused extra task slaves
   7.116 +        extraSlv = readPrivQ(semEnv->freeExtraTaskSlvQ);
   7.117 +        if (extraSlv != NULL)
   7.118         {    //have two slaves need tasks, so delete one
   7.119              //This both bounds the num extras, and delivers shutdown cond
   7.120 -         handleDissipate( extraSlv, semEnv );
   7.121 +            handleDissipate(extraSlv, semEnv);
   7.122              //then return NULL
   7.123 -         returnSlv = NULL;
   7.124 -         goto ReturnTheSlv;
   7.125 -       }
   7.126 -      else
   7.127 -       { //candidate for shutdown.. if all extras dissipated, and no tasks
   7.128 -         // and no ready to resume slaves, then no way to generate
   7.129 +            returnSlv = NULL;
   7.130 +            goto ReturnTheSlv;
   7.131 +        } else { //candidate for shutdown.. if all extras dissipated, and no tasks
   7.132 +            // and no ready to resume slaves, then no way to generate
   7.133           // more tasks (on this core -- other core might have task still)
   7.134 -         if( semEnv->numLiveExtraTaskSlvs == 0 && 
   7.135 -             semEnv->numLiveThreadSlvs == 0 )
   7.136 -          { //This core sees no way to generate more tasks, so say it
   7.137 -            if( semEnv->coreIsDone[coreNum] == FALSE )
   7.138 -             { semEnv->numCoresDone += 1;
   7.139 -               semEnv->coreIsDone[coreNum] = TRUE;
   7.140 -               #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   7.141 -               semEnv->shutdownInitiated = TRUE;
   7.142 +            if (semEnv->numLiveExtraTaskSlvs == 0 &&
   7.143 +                    semEnv->numLiveThreadSlvs == 0) { //This core sees no way to generate more tasks, so say it
   7.144 +                if (semEnv->coreIsDone[coreNum] == FALSE) {
   7.145 +                    semEnv->numCoresDone += 1;
   7.146 +                    semEnv->coreIsDone[coreNum] = TRUE;
   7.147 +#ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   7.148 +                    semEnv->shutdownInitiated = TRUE;
   7.149                 
   7.150 -               #else
   7.151 -               if( semEnv->numCoresDone == NUM_CORES )
   7.152 -                { //means no cores have work, and none can generate more
   7.153 -                  semEnv->shutdownInitiated = TRUE;
   7.154 +#else
   7.155 +                    if (semEnv->numCoresDone == NUM_CORES) { //means no cores have work, and none can generate more
   7.156 +                        semEnv->shutdownInitiated = TRUE;
   7.157 +                    }
   7.158 +#endif
   7.159                  }
   7.160 -               #endif
   7.161 -             }
   7.162 -          }
   7.163 +            }
   7.164              //return NULL.. no task and none to resume
   7.165 -         returnSlv = NULL;
   7.166 +            returnSlv = NULL;
   7.167              //except if shutdown has been initiated by this or other core
   7.168 -         if(semEnv->shutdownInitiated) 
   7.169 -          { returnSlv = VMS_SS__create_shutdown_slave();
   7.170 -          }
   7.171 -         goto ReturnTheSlv; //don't need, but completes pattern
   7.172 -       } //if( extraSlv != NULL )
   7.173 +            if (semEnv->shutdownInitiated) {
   7.174 +                returnSlv = VMS_SS__create_shutdown_slave();
   7.175 +            }
   7.176 +            goto ReturnTheSlv; //don't need, but completes pattern
   7.177 +        } //if( extraSlv != NULL )
   7.178      } //if( newTaskStub == NULL )
   7.179 -   //outcome: 1)slave was just pointed to task, 2)no tasks, so slave NULL
   7.180 +    //outcome: 1)slave was just pointed to task, 2)no tasks, so slave NULL
   7.181  
   7.182 -ReturnTheSlv:  //Nina, doing gotos to here should help with holistic..
   7.183 +ReturnTheSlv: //Nina, doing gotos to here should help with holistic..
   7.184  
   7.185 -   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.186 -   if( returnSlv == NULL )
   7.187 -    { returnSlv = semEnv->idleSlv[coreNum][slotNum]; 
   7.188 -    
   7.189 -         //things that would normally happen in resume(), but these VPs
   7.190 -         // never go there
   7.191 -      returnSlv->assignCount++; //Somewhere here!
   7.192 -      Unit newu;
   7.193 -      newu.vp = returnSlv->slaveID;
   7.194 -      newu.task = returnSlv->assignCount;
   7.195 -      addToListOfArrays(Unit,newu,semEnv->unitList);
   7.196  
   7.197 -      if (returnSlv->assignCount > 1)
   7.198 -       { Dependency newd;
   7.199 -         newd.from_vp = returnSlv->slaveID;
   7.200 -         newd.from_task = returnSlv->assignCount - 1;
   7.201 -         newd.to_vp = returnSlv->slaveID;
   7.202 -         newd.to_task = returnSlv->assignCount;
   7.203 -         addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   7.204 -       }
   7.205 +#ifdef IDLE_SLAVES
   7.206 +    if (!returnSlv) {
   7.207 +        returnSlv = semEnv->idlePr[coreNum][slotNum];
   7.208 +
   7.209 +        if (semEnv->shutdownInitiated) {
   7.210 +            returnSlv = VMS_SS__create_shutdown_slave();
   7.211 +        }
   7.212      }
   7.213 -   #endif
   7.214 -   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.215 -   if( returnSlv != NULL )
   7.216 -    { //assignSlv->numTimesAssigned++;
   7.217 -      Unit prev_in_slot = 
   7.218 -         semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
   7.219 -      if(prev_in_slot.vp != 0)
   7.220 -       { Dependency newd;
   7.221 -         newd.from_vp = prev_in_slot.vp;
   7.222 -         newd.from_task = prev_in_slot.task;
   7.223 -         newd.to_vp = returnSlv->slaveID;
   7.224 -         newd.to_task = returnSlv->assignCount;
   7.225 -         addToListOfArrays(Dependency,newd,semEnv->hwArcs);   
   7.226 -       }
   7.227 -      prev_in_slot.vp = returnSlv->slaveID;
   7.228 -      prev_in_slot.task = returnSlv->assignCount;
   7.229 -      semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] =
   7.230 -         prev_in_slot;        
   7.231 +#endif
   7.232 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.233 +    if (returnSlv && (returnSlv->typeOfVP == Slave || returnSlv->typeOfVP == Idle)) {
   7.234 +        returnSlv->assignCount++;
   7.235 +        
   7.236 +        Unit newu;
   7.237 +        newu.vp = returnSlv->slaveID;
   7.238 +        newu.task = returnSlv->assignCount;
   7.239 +        addToListOfArrays(Unit, newu, semEnv->unitList);
   7.240 +        
   7.241 +        if (returnSlv->assignCount > 1) {
   7.242 +                Dependency newd;
   7.243 +                newd.from_vp = returnSlv->slaveID;
   7.244 +                newd.from_task = returnSlv->assignCount - 1;
   7.245 +                newd.to_vp = returnSlv->slaveID;
   7.246 +                newd.to_task = returnSlv->assignCount;
   7.247 +                addToListOfArrays(Dependency, newd, semEnv->ctlDependenciesList);
   7.248 +        }
   7.249 +        
   7.250 +        Unit prev_in_slot = semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
   7.251 +        if (prev_in_slot.vp != 0) {
   7.252 +            Dependency newd;
   7.253 +            newd.from_vp = prev_in_slot.vp;
   7.254 +            newd.from_task = prev_in_slot.task;
   7.255 +            newd.to_vp = returnSlv->slaveID;
   7.256 +            newd.to_task = returnSlv->assignCount;
   7.257 +            addToListOfArrays(Dependency, newd, semEnv->hwArcs);
   7.258 +        }
   7.259 +        prev_in_slot.vp = returnSlv->slaveID;
   7.260 +        prev_in_slot.task = returnSlv->assignCount;
   7.261 +        semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] = prev_in_slot;
   7.262      }
   7.263 -   #endif
   7.264 -   return( returnSlv );
   7.265 - }
   7.266 +#endif
   7.267 +    return ( returnSlv);
   7.268 +}
   7.269  
   7.270  
   7.271  //===========================  Request Handler  ============================
   7.272  //
   7.273 +
   7.274  /*
   7.275   * (BTW not inline because invoked indirectly via a pointer)
   7.276   */
   7.277  void
   7.278 -VSs__Request_Handler( SlaveVP *requestingSlv, void *_semEnv )
   7.279 - { VSsSemEnv *semEnv;
   7.280 -   VMSReqst  *req;
   7.281 -   
   7.282 -   semEnv = (VSsSemEnv *)_semEnv;
   7.283 +VSs__Request_Handler(SlaveVP *requestingSlv, void *_semEnv) {
   7.284 +    VSsSemEnv *semEnv;
   7.285 +    VMSReqst *req;
   7.286  
   7.287 -   req    = VMS_PI__take_next_request_out_of( requestingSlv );
   7.288 +    semEnv = (VSsSemEnv *) _semEnv;
   7.289  
   7.290 -   while( req != NULL )
   7.291 -    {
   7.292 -      switch( req->reqType )
   7.293 -       { case semantic:     handleSemReq(        req, requestingSlv, semEnv);
   7.294 -            break;
   7.295 -         case createReq:    handleCreate(        req, requestingSlv, semEnv);
   7.296 -            break;
   7.297 -         case dissipate:    handleDissipate(          requestingSlv, semEnv);
   7.298 -            break;
   7.299 -         case VMSSemantic:  VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv,
   7.300 -                                           (ResumeSlvFnPtr) &resume_slaveVP);
   7.301 -            break;
   7.302 -         default:
   7.303 -            break;
   7.304 -       }
   7.305 -      
   7.306 -      req = VMS_PI__take_next_request_out_of( requestingSlv );
   7.307 +    req = VMS_PI__take_next_request_out_of(requestingSlv);
   7.308 +
   7.309 +    while (req != NULL) {
   7.310 +        switch (req->reqType) {
   7.311 +                case semantic: handleSemReq(req, requestingSlv, semEnv);
   7.312 +                break;
   7.313 +            case createReq: handleCreate(req, requestingSlv, semEnv);
   7.314 +                break;
   7.315 +            case dissipate: handleDissipate(requestingSlv, semEnv);
   7.316 +                break;
   7.317 +            case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv,
   7.318 +                        (ResumeSlvFnPtr) & resume_slaveVP);
   7.319 +                break;
   7.320 +            default:
   7.321 +                break;
   7.322 +        }
   7.323 +
   7.324 +        req = VMS_PI__take_next_request_out_of(requestingSlv);
   7.325      } //while( req != NULL )
   7.326  
   7.327 - }
   7.328 -
   7.329 +}
   7.330  
   7.331  inline void
   7.332 -handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VSsSemEnv *semEnv )
   7.333 - { VSsSemReq *semReq;
   7.334 +handleSemReq(VMSReqst *req, SlaveVP *reqSlv, VSsSemEnv *semEnv) {
   7.335 +    VSsSemReq *semReq;
   7.336  
   7.337 -   semReq = VMS_PI__take_sem_reqst_from(req);
   7.338 -   if( semReq == NULL ) return;
   7.339 -   switch( semReq->reqType )  //sem handlers are all in other file
   7.340 +    semReq = VMS_PI__take_sem_reqst_from(req);
   7.341 +    if (semReq == NULL) return;
   7.342 +    switch (semReq->reqType) //sem handlers are all in other file
   7.343      {
   7.344 -      case submit_task:     handleSubmitTask(   semReq,         semEnv);
   7.345 -         break; 
   7.346 -      case end_task:        handleEndTask(      semReq,         semEnv);
   7.347 -         break;
   7.348 -      case send_type_to:    handleSendTypeTo(   semReq,         semEnv);
   7.349 -         break;
   7.350 -      case send_from_to:    handleSendFromTo(   semReq,         semEnv);
   7.351 -         break;
   7.352 -      case receive_type_to: handleReceiveTypeTo(semReq,         semEnv);
   7.353 -         break;
   7.354 -      case receive_from_to: handleReceiveFromTo(semReq,         semEnv);
   7.355 -         break;
   7.356 -      case taskwait:        handleTaskwait(     semReq, reqSlv, semEnv);
   7.357 -           break;
   7.358 -         
   7.359 -      //====================================================================
   7.360 -      case malloc_req:      handleMalloc(       semReq, reqSlv, semEnv);
   7.361 -         break;
   7.362 -      case free_req:        handleFree(         semReq, reqSlv, semEnv);
   7.363 -         break;
   7.364 -      case singleton_fn_start:  handleStartFnSingleton(semReq, reqSlv, semEnv);
   7.365 -         break;
   7.366 -      case singleton_fn_end:    handleEndFnSingleton(  semReq, reqSlv, semEnv);
   7.367 -         break;
   7.368 -      case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv);
   7.369 -         break;
   7.370 -      case singleton_data_end:  handleEndDataSingleton(semReq, reqSlv, semEnv);
   7.371 -         break;
   7.372 -      case atomic:          handleAtomic(       semReq, reqSlv, semEnv);
   7.373 -         break;
   7.374 -      case trans_start:     handleTransStart(   semReq, reqSlv, semEnv);
   7.375 -         break;
   7.376 -      case trans_end:       handleTransEnd(     semReq, reqSlv, semEnv);
   7.377 -         break;
   7.378 +        case submit_task: handleSubmitTask(semReq, semEnv);
   7.379 +            break;
   7.380 +        case end_task: handleEndTask(semReq, semEnv);
   7.381 +            break;
   7.382 +        case send_type_to: handleSendTypeTo(semReq, semEnv);
   7.383 +            break;
   7.384 +        case send_from_to: handleSendFromTo(semReq, semEnv);
   7.385 +            break;
   7.386 +        case receive_type_to: handleReceiveTypeTo(semReq, semEnv);
   7.387 +            break;
   7.388 +        case receive_from_to: handleReceiveFromTo(semReq, semEnv);
   7.389 +            break;
   7.390 +        case taskwait: handleTaskwait(semReq, reqSlv, semEnv);
   7.391 +            break;
   7.392 +
   7.393 +            //====================================================================
   7.394 +        case malloc_req: handleMalloc(semReq, reqSlv, semEnv);
   7.395 +            break;
   7.396 +        case free_req: handleFree(semReq, reqSlv, semEnv);
   7.397 +            break;
   7.398 +        case singleton_fn_start: handleStartFnSingleton(semReq, reqSlv, semEnv);
   7.399 +            break;
   7.400 +        case singleton_fn_end: handleEndFnSingleton(semReq, reqSlv, semEnv);
   7.401 +            break;
   7.402 +        case singleton_data_start:handleStartDataSingleton(semReq, reqSlv, semEnv);
   7.403 +            break;
   7.404 +        case singleton_data_end: handleEndDataSingleton(semReq, reqSlv, semEnv);
   7.405 +            break;
   7.406 +        case atomic: handleAtomic(semReq, reqSlv, semEnv);
   7.407 +            break;
   7.408 +        case trans_start: handleTransStart(semReq, reqSlv, semEnv);
   7.409 +            break;
   7.410 +        case trans_end: handleTransEnd(semReq, reqSlv, semEnv);
   7.411 +            break;
   7.412      }
   7.413 - }
   7.414 +}
   7.415  
   7.416  
   7.417  
   7.418  //=========================== VMS Request Handlers ==============================
   7.419 +
   7.420  /*SlaveVP dissipate -- this is NOT task-end!, only call this to get rid of
   7.421   * extra task slaves, and to end explicitly created threads
   7.422   */
   7.423  inline void
   7.424 -handleDissipate( SlaveVP *requestingSlv, VSsSemEnv *semEnv )
   7.425 - { VSsSemData  *semData;
   7.426 -   VSsTaskStub *parentTaskStub, *ownTaskStub;
   7.427 - 
   7.428 -         DEBUG__printf1(dbgRqstHdlr,"Dissipate request from processor %d",
   7.429 -                                                     requestingSlv->slaveID)
   7.430 -   semData = (VSsSemData *)requestingSlv->semanticData;
   7.431 +handleDissipate(SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
   7.432 +    VSsSemData *semData;
   7.433 +    VSsTaskStub *parentTaskStub, *ownTaskStub;
   7.434  
   7.435 -   if( semData->slaveType == ExtraTaskSlv )
   7.436 -    { semEnv->numLiveExtraTaskSlvs -= 1; //for detecting shutdown condition
   7.437 -         //Has no task assigned, so no parents and no children, so free self
   7.438 -      goto FreeSlaveStateAndReturn;
   7.439 +    DEBUG__printf1(dbgRqstHdlr, "Dissipate request from processor %d",
   7.440 +            requestingSlv->slaveID)
   7.441 +            semData = (VSsSemData *) requestingSlv->semanticData;
   7.442 +
   7.443 +    if (semData->slaveType == ExtraTaskSlv) {
   7.444 +        semEnv->numLiveExtraTaskSlvs -= 1; //for detecting shutdown condition
   7.445 +        //Has no task assigned, so no parents and no children, so free self
   7.446 +        goto FreeSlaveStateAndReturn;
   7.447      }
   7.448  
   7.449 -   if( semData->slaveType == SlotTaskSlv )
   7.450 -    {    //should never call dissipate on a slot assigned slave
   7.451 -      VMS_PI__throw_exception( "dissipate a slot-assigned slave", requestingSlv, NULL );
   7.452 +    if (semData->slaveType == SlotTaskSlv) { //should never call dissipate on a slot assigned slave
   7.453 +        VMS_PI__throw_exception("dissipate a slot-assigned slave", requestingSlv, NULL);
   7.454      }
   7.455  
   7.456 -      //if make it to here, then is a thread slave ending
   7.457 -   semEnv->numLiveThreadSlvs -= 1; //for detecting shutdown condition
   7.458 +    //if make it to here, then is a thread slave ending
   7.459 +    semEnv->numLiveThreadSlvs -= 1; //for detecting shutdown condition
   7.460     
   7.461 -   ownTaskStub    = semData->taskStub;
   7.462 -   parentTaskStub = ownTaskStub->parentTaskStub;
   7.463 -   parentTaskStub->numLiveChildThreads -= 1;  //not freed, even if ended
   7.464 -   
   7.465 -      //if all children ended, then free this task's stub
   7.466 -      // else, keep stub around, and last child will free it (below)
   7.467 -   if( ownTaskStub->numLiveChildTasks   == 0 &&
   7.468 -       ownTaskStub->numLiveChildThreads == 0 )
   7.469 -      free_task_stub( ownTaskStub );
   7.470 -   else
   7.471 -      ownTaskStub->isEnded = TRUE; //for children to see when they end
   7.472 +    ownTaskStub = semData->taskStub;
   7.473 +    parentTaskStub = ownTaskStub->parentTaskStub;
   7.474 +    parentTaskStub->numLiveChildThreads -= 1; //not freed, even if ended
   7.475  
   7.476 -      //Now, check on parents waiting on child threads to end
   7.477 -   if( parentTaskStub->isWaitingForChildThreadsToEnd &&
   7.478 -       parentTaskStub->numLiveChildThreads == 0 )
   7.479 -    { parentTaskStub->isWaitingForChildThreadsToEnd = FALSE;
   7.480 -      if( parentTaskStub->isWaitingForChildTasksToEnd )
   7.481 -        return; //still waiting on tasks (should be impossible)
   7.482 -      else //parent free to resume
   7.483 -        resume_slaveVP( parentTaskStub->slaveAssignedTo, semEnv );
   7.484 -    }
   7.485 -   
   7.486 -      //check if this is last child of ended parent (note, not possible to
   7.487 -      // have more than one level of ancestor waiting to be freed)
   7.488 -   if( parentTaskStub->isEnded )
   7.489 -    { if( parentTaskStub->numLiveChildTasks   == 0 && 
   7.490 -          parentTaskStub->numLiveChildThreads == 0 )
   7.491 -         free_task_stub( parentTaskStub ); //just stub, semData already freed
   7.492 +    //if all children ended, then free this task's stub
   7.493 +    // else, keep stub around, and last child will free it (below)
   7.494 +    if (ownTaskStub->numLiveChildTasks == 0 &&
   7.495 +            ownTaskStub->numLiveChildThreads == 0)
   7.496 +        free_task_stub(ownTaskStub);
   7.497 +    else
   7.498 +        ownTaskStub->isEnded = TRUE; //for children to see when they end
   7.499 +
   7.500 +    //Now, check on parents waiting on child threads to end
   7.501 +    if (parentTaskStub->isWaitingForChildThreadsToEnd &&
   7.502 +            parentTaskStub->numLiveChildThreads == 0) {
   7.503 +        parentTaskStub->isWaitingForChildThreadsToEnd = FALSE;
   7.504 +        if (parentTaskStub->isWaitingForChildTasksToEnd)
   7.505 +            return; //still waiting on tasks (should be impossible)
   7.506 +        else //parent free to resume
   7.507 +            resume_slaveVP(parentTaskStub->slaveAssignedTo, semEnv);
   7.508      }
   7.509  
   7.510 -      //Free the semData and requesting slave's base state for all cases
   7.511 - FreeSlaveStateAndReturn:
   7.512 -   VMS_PI__free( semData );
   7.513 -   VMS_PI__dissipate_slaveVP( requestingSlv );
   7.514 -   return; 
   7.515 +    //check if this is last child of ended parent (note, not possible to
   7.516 +    // have more than one level of ancestor waiting to be freed)
   7.517 +    if (parentTaskStub->isEnded) {
   7.518 +        if (parentTaskStub->numLiveChildTasks == 0 &&
   7.519 +                parentTaskStub->numLiveChildThreads == 0)
   7.520 +            free_task_stub(parentTaskStub); //just stub, semData already freed
   7.521 +    }
   7.522 +
   7.523 +    //Free the semData and requesting slave's base state for all cases
   7.524 +FreeSlaveStateAndReturn:
   7.525 +    VMS_PI__free(semData);
   7.526 +    VMS_PI__dissipate_slaveVP(requestingSlv);
   7.527 +    return;
   7.528        //Note, this is not a location to check for shutdown because doesn't
   7.529        // say anything about work availability here.. check for shutdown in
   7.530        // places try to get work for the core (in the assigner)
   7.531 - }
   7.532 -
   7.533 -   
   7.534 +}
   7.535  
   7.536  /*Re-use this in the entry-point fn
   7.537   */
   7.538  inline SlaveVP *
   7.539 -VSs__create_slave_helper( TopLevelFnPtr fnPtr, void *initData,
   7.540 -                          VSsSemEnv *semEnv,    int32 coreToAssignOnto )
   7.541 - { SlaveVP    *newSlv;
   7.542 -   VSsSemData   *semData;
   7.543 +VSs__create_slave_helper(TopLevelFnPtr fnPtr, void *initData,
   7.544 +        VSsSemEnv *semEnv, int32 coreToAssignOnto) {
   7.545 +    SlaveVP *newSlv;
   7.546 +    VSsSemData *semData;
   7.547  
   7.548 -      //This is running in master, so use internal version
   7.549 -   newSlv = VMS_PI__create_slaveVP( fnPtr, initData );
   7.550 +    //This is running in master, so use internal version
   7.551 +    newSlv = VMS_PI__create_slaveVP(fnPtr, initData);
   7.552  
   7.553 -      //task slaves differ from thread slaves by the settings in the taskStub
   7.554 -      //so, don't create task stub here, only create semData, which is same
   7.555 -      // for all kinds of slaves
   7.556 -   semData = VMS_PI__malloc( sizeof(VSsSemData) );
   7.557 -   semData->highestTransEntered = -1;
   7.558 -   semData->lastTransEntered    = NULL;
   7.559 -   semData->needsTaskAssigned   = TRUE;
   7.560 -   semData->taskStub            = NULL;
   7.561 +    //task slaves differ from thread slaves by the settings in the taskStub
   7.562 +    //so, don't create task stub here, only create semData, which is same
   7.563 +    // for all kinds of slaves
   7.564 +    semData = VMS_PI__malloc(sizeof (VSsSemData));
   7.565 +    semData->highestTransEntered = -1;
   7.566 +    semData->lastTransEntered = NULL;
   7.567 +    semData->needsTaskAssigned = TRUE;
   7.568 +    semData->taskStub = NULL;
   7.569 +
   7.570 +    newSlv->semanticData = semData;
   7.571 +
   7.572 +    //=================== Assign new processor to a core =====================
   7.573 +#ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   7.574 +    newSlv->coreAnimatedBy = 0;
   7.575 +
   7.576 +#else
   7.577 +    //Assigning slaves to cores is part of SSR code..
   7.578 +    if (coreToAssignOnto < 0 || coreToAssignOnto >= NUM_CORES) { //out-of-range, so round-robin assignment
   7.579 +        newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv;
   7.580 +
   7.581 +        if (semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1)
   7.582 +            semEnv->nextCoreToGetNewSlv = 0;
   7.583 +        else
   7.584 +            semEnv->nextCoreToGetNewSlv += 1;
   7.585 +    } else //core num in-range, so use it
   7.586 +    {
   7.587 +        newSlv->coreAnimatedBy = coreToAssignOnto;
   7.588 +    }
   7.589 +#endif
   7.590 +    //========================================================================
   7.591     
   7.592 -   newSlv->semanticData = semData;
   7.593 -
   7.594 -   //=================== Assign new processor to a core =====================
   7.595 -   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   7.596 -   newSlv->coreAnimatedBy = 0;
   7.597 -
   7.598 -   #else
   7.599 -      //Assigning slaves to cores is part of SSR code..
   7.600 -   if(coreToAssignOnto < 0 || coreToAssignOnto >= NUM_CORES )
   7.601 -    {    //out-of-range, so round-robin assignment
   7.602 -      newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv;
   7.603 -
   7.604 -      if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 )
   7.605 -          semEnv->nextCoreToGetNewSlv  = 0;
   7.606 -      else
   7.607 -          semEnv->nextCoreToGetNewSlv += 1;
   7.608 -    }
   7.609 -   else //core num in-range, so use it
   7.610 -    { newSlv->coreAnimatedBy = coreToAssignOnto;
   7.611 -    }
   7.612 -   #endif
   7.613 -   //========================================================================
   7.614 -   
   7.615 -   return newSlv;
   7.616 - }
   7.617 +    return newSlv;
   7.618 +}
   7.619  
   7.620  VSsTaskStub *
   7.621 -create_thread_task_stub( void *initData )
   7.622 - { VSsTaskStub *newStub;
   7.623 -         
   7.624 -   newStub = VMS_PI__malloc( sizeof(VSsTaskStub) );
   7.625 -   newStub->numBlockingProp = 0;
   7.626 -   newStub->slaveAssignedTo = NULL; //set later
   7.627 -   newStub->taskType        = IS_A_THREAD;
   7.628 -   newStub->ptrEntries      = NULL;
   7.629 -   newStub->args            = initData;  
   7.630 -   newStub->numLiveChildTasks              = 0;
   7.631 -   newStub->numLiveChildThreads            = 0;
   7.632 -   newStub->parentTaskStub                = NULL;
   7.633 -   newStub->isWaitingForChildTasksToEnd    = FALSE;
   7.634 -   newStub->isWaitingForChildThreadsToEnd  = FALSE;
   7.635 -   newStub->taskID          = NULL;
   7.636 +create_thread_task_stub(void *initData) {
   7.637 +    VSsTaskStub *newStub;
   7.638  
   7.639 -   return newStub;
   7.640 - }
   7.641 +    newStub = VMS_PI__malloc(sizeof (VSsTaskStub));
   7.642 +    newStub->numBlockingProp = 0;
   7.643 +    newStub->slaveAssignedTo = NULL; //set later
   7.644 +    newStub->taskType = IS_A_THREAD;
   7.645 +    newStub->ptrEntries = NULL;
   7.646 +    newStub->args = initData;
   7.647 +    newStub->numLiveChildTasks = 0;
   7.648 +    newStub->numLiveChildThreads = 0;
   7.649 +    newStub->parentTaskStub = NULL;
   7.650 +    newStub->isWaitingForChildTasksToEnd = FALSE;
   7.651 +    newStub->isWaitingForChildThreadsToEnd = FALSE;
   7.652 +    newStub->taskID = NULL;
   7.653 +
   7.654 +    return newStub;
   7.655 +}
   7.656  
   7.657  /*Application invokes this when it explicitly creates a thread via the
   7.658   * "VSs__create_thread()" command.
   7.659 @@ -417,89 +408,91 @@
   7.660   *When the slave calls dissipate, have to recycle the task stub.
   7.661   */
   7.662  inline void
   7.663 -handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
   7.664 - { VSsSemReq  *semReq;
   7.665 -   SlaveVP    *newSlv;
   7.666 -   VSsSemData *semData, *parentSemData;
   7.667 -   
   7.668 -   semReq = VMS_PI__take_sem_reqst_from( req );
   7.669 +handleCreate(VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
   7.670 +    VSsSemReq *semReq;
   7.671 +    SlaveVP *newSlv;
   7.672 +    VSsSemData *semData, *parentSemData;
   7.673  
   7.674 -   semEnv->numLiveThreadSlvs += 1;
   7.675 -   
   7.676 -      //Deceptive -- must work when creator is a normal task, or seed,
   7.677 -      // or another thd.. think have valid sem data and task stub for all
   7.678 -      //This hdlr is NOT called when creating the seed slave
   7.679 -   parentSemData = (VSsSemData *)semReq->callingSlv->semanticData;
   7.680 -   parentSemData->taskStub->numLiveChildThreads += 1;
   7.681 +    semReq = VMS_PI__take_sem_reqst_from(req);
   7.682  
   7.683 -      //use an idle "extra" slave, if have one
   7.684 -   newSlv = readPrivQ( semEnv->freeExtraTaskSlvQ );
   7.685 -   if( newSlv != NULL ) //got an idle one, so reset it
   7.686 -    { semData = (VSsSemData *)newSlv->semanticData;
   7.687 -      semData->highestTransEntered = -1;
   7.688 -      semData->lastTransEntered    = NULL;
   7.689 -      VMS_int__reset_slaveVP_to_TopLvlFn( newSlv, semReq->fnPtr, 
   7.690 -                                                         semReq->initData );
   7.691 -    }
   7.692 -   else //no idle ones, create a new
   7.693 -    { newSlv = VSs__create_slave_helper( semReq->fnPtr, semReq->initData,
   7.694 -                                         semEnv, semReq->coreToAssignOnto ); 
   7.695 -      semData = (VSsSemData *)newSlv->semanticData;
   7.696 +    semEnv->numLiveThreadSlvs += 1;
   7.697 +
   7.698 +    //Deceptive -- must work when creator is a normal task, or seed,
   7.699 +    // or another thd.. think have valid sem data and task stub for all
   7.700 +    //This hdlr is NOT called when creating the seed slave
   7.701 +    parentSemData = (VSsSemData *) semReq->callingSlv->semanticData;
   7.702 +    parentSemData->taskStub->numLiveChildThreads += 1;
   7.703 +
   7.704 +    //use an idle "extra" slave, if have one
   7.705 +    newSlv = readPrivQ(semEnv->freeExtraTaskSlvQ);
   7.706 +    if (newSlv != NULL) //got an idle one, so reset it
   7.707 +    {
   7.708 +        semData = (VSsSemData *) newSlv->semanticData;
   7.709 +        semData->highestTransEntered = -1;
   7.710 +        semData->lastTransEntered = NULL;
   7.711 +        VMS_int__reset_slaveVP_to_TopLvlFn(newSlv, semReq->fnPtr,
   7.712 +                semReq->initData);
   7.713 +    } else //no idle ones, create a new
   7.714 +    {
   7.715 +        newSlv = VSs__create_slave_helper(semReq->fnPtr, semReq->initData,
   7.716 +                semEnv, semReq->coreToAssignOnto);
   7.717 +        semData = (VSsSemData *) newSlv->semanticData;
   7.718      }
   7.719  
   7.720 -      //now, create a new task and assign to the thread
   7.721 -   semData->needsTaskAssigned = FALSE;  //thread has a permanent task
   7.722 -   semData->taskStub = create_thread_task_stub( semReq->initData );
   7.723 -   semData->taskStub->parentTaskStub = parentSemData->taskStub;
   7.724 -   semData->slaveType = ThreadSlv; //this hdlr only creates thread slvs
   7.725 +    //now, create a new task and assign to the thread
   7.726 +    semData->needsTaskAssigned = FALSE; //thread has a permanent task
   7.727 +    semData->taskStub = create_thread_task_stub(semReq->initData);
   7.728 +    semData->taskStub->parentTaskStub = parentSemData->taskStub;
   7.729 +    semData->slaveType = ThreadSlv; //this hdlr only creates thread slvs
   7.730  
   7.731 -         DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d",
   7.732 -                                    requestingSlv->slaveID, newSlv->slaveID)
   7.733 +    DEBUG__printf2(dbgRqstHdlr, "Create from: %d, new VP: %d",
   7.734 +            requestingSlv->slaveID, newSlv->slaveID)
   7.735  
   7.736 -   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.737 -   Dependency newd;
   7.738 -   newd.from_vp = requestingSlv->slaveID;
   7.739 -   newd.from_task = requestingSlv->assignCount;
   7.740 -   newd.to_vp = newSlv->slaveID;
   7.741 -   newd.to_task = 1;
   7.742 -   addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   7.743 -   #endif
   7.744 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.745 +            Dependency newd;
   7.746 +    newd.from_vp = requestingSlv->slaveID;
   7.747 +    newd.from_task = requestingSlv->assignCount;
   7.748 +    newd.to_vp = newSlv->slaveID;
   7.749 +    newd.to_task = 1;
   7.750 +    addToListOfArrays(Dependency, newd, semEnv->commDependenciesList);
   7.751 +#endif
   7.752  
   7.753 -      //For VSs, caller needs ptr to created thread returned to it
   7.754 -   requestingSlv->dataRetFromReq = newSlv;
   7.755 -   resume_slaveVP(requestingSlv , semEnv );
   7.756 -   resume_slaveVP( newSlv,        semEnv );
   7.757 - }
   7.758 +    //For VSs, caller needs ptr to created thread returned to it
   7.759 +    requestingSlv->dataRetFromReq = newSlv;
   7.760 +    resume_slaveVP(requestingSlv, semEnv);
   7.761 +    resume_slaveVP(newSlv, semEnv);
   7.762 +}
   7.763  
   7.764  
   7.765  //=========================== Helper ==============================
   7.766 +
   7.767  void
   7.768 -resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv )
   7.769 - {
   7.770 -      //both suspended tasks and suspended explicit slaves resumed with this
   7.771 -   writePrivQ( slave, semEnv->slavesReadyToResumeQ );
   7.772 -   
   7.773 -   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
   7.774 -/*
   7.775 -   int lastRecordIdx = slave->counter_history_array_info->numInArray -1;
   7.776 -   CounterRecord* lastRecord = slave->counter_history[lastRecordIdx];
   7.777 -   saveLowTimeStampCountInto(lastRecord->unblocked_timestamp);
   7.778 -*/
   7.779 -   #endif
   7.780 -   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.781 -   slave->assignCount++; //Somewhere here!
   7.782 -   Unit newu;
   7.783 -   newu.vp = slave->slaveID;
   7.784 -   newu.task = slave->assignCount;
   7.785 -   addToListOfArrays(Unit,newu,semEnv->unitList);
   7.786 -   
   7.787 -   if (slave->assignCount > 1){
   7.788 +resume_slaveVP(SlaveVP *slave, VSsSemEnv *semEnv) {
   7.789 +    //both suspended tasks and suspended explicit slaves resumed with this
   7.790 +    writePrivQ(slave, semEnv->slavesReadyToResumeQ);
   7.791 +
   7.792 +#ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
   7.793 +    /*
   7.794 +       int lastRecordIdx = slave->counter_history_array_info->numInArray -1;
   7.795 +       CounterRecord* lastRecord = slave->counter_history[lastRecordIdx];
   7.796 +       saveLowTimeStampCountInto(lastRecord->unblocked_timestamp);
   7.797 +     */
   7.798 +#endif
   7.799 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.800 +    /*
   7.801 +    slave->assignCount++; //Somewhere here!
   7.802 +    Unit newu;
   7.803 +    newu.vp = slave->slaveID;
   7.804 +    newu.task = slave->assignCount;
   7.805 +    addToListOfArrays(Unit, newu, semEnv->unitList);
   7.806 +
   7.807 +    if (slave->assignCount > 1) {
   7.808          Dependency newd;
   7.809          newd.from_vp = slave->slaveID;
   7.810          newd.from_task = slave->assignCount - 1;
   7.811          newd.to_vp = slave->slaveID;
   7.812          newd.to_task = slave->assignCount;
   7.813 -        addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   7.814 -   }
   7.815 -   #endif
   7.816 - }
   7.817 +        addToListOfArrays(Dependency, newd, semEnv->ctlDependenciesList);
   7.818 +    }*/
   7.819 +#endif
   7.820 +}
     8.1 --- a/VSs_Request_Handlers.c	Thu Aug 23 03:21:03 2012 -0700
     8.2 +++ b/VSs_Request_Handlers.c	Tue Aug 28 15:33:16 2012 +0200
     8.3 @@ -18,7 +18,7 @@
     8.4  
     8.5  //=========================== Local Fn Prototypes ===========================
     8.6  void
     8.7 -resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv );
     8.8 +resume_slaveVP(SlaveVP *slave, VSsSemEnv *semEnv);
     8.9  
    8.10  
    8.11  
    8.12 @@ -29,94 +29,97 @@
    8.13  /*Only clone the elements of req used in these reqst handlers
    8.14   */
    8.15  VSsSemReq *
    8.16 -cloneReq( VSsSemReq *semReq )
    8.17 - { VSsSemReq *clonedReq;
    8.18 +cloneReq(VSsSemReq *semReq) {
    8.19 +    VSsSemReq *clonedReq;
    8.20  
    8.21 -   clonedReq             = VMS_PI__malloc( sizeof(VSsSemReq) );
    8.22 -   clonedReq->reqType    = semReq->reqType;
    8.23 -   clonedReq->senderSlv  = semReq->senderSlv;
    8.24 -   clonedReq->receiverSlv= semReq->receiverSlv;
    8.25 -   clonedReq->msg        = semReq->msg;
    8.26 -   clonedReq->nextReqInHashEntry = NULL;
    8.27 -   
    8.28 -   return clonedReq;
    8.29 - }
    8.30 +    clonedReq = VMS_PI__malloc(sizeof (VSsSemReq));
    8.31 +    clonedReq->reqType = semReq->reqType;
    8.32 +    clonedReq->senderSlv = semReq->senderSlv;
    8.33 +    clonedReq->receiverSlv = semReq->receiverSlv;
    8.34 +    clonedReq->msg = semReq->msg;
    8.35 +    clonedReq->nextReqInHashEntry = NULL;
    8.36  
    8.37 -
    8.38 +    return clonedReq;
    8.39 +}
    8.40  
    8.41  HashEntry *
    8.42 -giveEntryElseInsertReqst32( int32 *key, VSsSemReq *semReq,
    8.43 -                            HashTable   *commHashTbl )
    8.44 - { HashEntry    *entry;
    8.45 -   VSsSemReq    *waitingReq;
    8.46 +giveEntryElseInsertReqst32(int32 *key, VSsSemReq *semReq,
    8.47 +        HashTable *commHashTbl) {
    8.48 +    HashEntry *entry;
    8.49 +    VSsSemReq *waitingReq;
    8.50  
    8.51 -   entry = getEntryFromTable32( key, commHashTbl );
    8.52 -   if( entry == NULL )
    8.53 -    {    //no waiting sends or receives, so add this request and exit
    8.54 -         // note: have to clone the request because it's on stack of sender
    8.55 -      addValueIntoTable32( key, cloneReq( semReq ), commHashTbl );
    8.56 -      return NULL;
    8.57 +    entry = getEntryFromTable32(key, commHashTbl);
    8.58 +    if (entry == NULL) { //no waiting sends or receives, so add this request and exit
    8.59 +        // note: have to clone the request because it's on stack of sender
    8.60 +        addValueIntoTable32(key, cloneReq(semReq), commHashTbl);
    8.61 +        return NULL;
    8.62      }
    8.63 -   waitingReq = (VSsSemReq *)entry->content;
    8.64 -   if( waitingReq == NULL )  //might happen when last waiting gets paired
    8.65 -    {    //no waiting sends or receives, so add this request and exit
    8.66 -      entry->content = semReq;
    8.67 -      return NULL;
    8.68 +    waitingReq = (VSsSemReq *) entry->content;
    8.69 +    if (waitingReq == NULL) //might happen when last waiting gets paired
    8.70 +    { //no waiting sends or receives, so add this request and exit
    8.71 +        entry->content = semReq;
    8.72 +        return NULL;
    8.73      }
    8.74 -   return entry;
    8.75 - }
    8.76 +    return entry;
    8.77 +}
    8.78  
    8.79 -      
    8.80  inline VSsPointerEntry *
    8.81 -create_pointer_entry( )
    8.82 - { VSsPointerEntry *newEntry;
    8.83 -   
    8.84 -   newEntry = VMS_PI__malloc( sizeof(VSsPointerEntry) );
    8.85 -   newEntry->hasEnabledNonFinishedWriter = FALSE;
    8.86 -   newEntry->numEnabledNonDoneReaders    = 0;
    8.87 -   newEntry->waitersQ                    = makePrivQ();
    8.88 -      
    8.89 -   return newEntry;
    8.90 - }
    8.91 +create_pointer_entry() {
    8.92 +    VSsPointerEntry *newEntry;
    8.93 +
    8.94 +    newEntry = VMS_PI__malloc(sizeof (VSsPointerEntry));
    8.95 +    newEntry->hasEnabledNonFinishedWriter = FALSE;
    8.96 +    newEntry->numEnabledNonDoneReaders = 0;
    8.97 +    newEntry->waitersQ = makePrivQ();
    8.98 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    8.99 +    newEntry->lastWriterVP = 0;
   8.100 +    newEntry->lastWriterTask = 0;
   8.101 +#endif
   8.102 +    return newEntry;
   8.103 +}
   8.104  
   8.105  /*malloc's space and initializes fields -- and COPIES the arg values
   8.106   * to new space
   8.107   */
   8.108  inline VSsTaskStub *
   8.109 -create_task_stub( VSsTaskType *taskType, void **args )
   8.110 - { void **newArgs;
   8.111 -   VSsTaskStub* newStub = VMS_int__malloc( sizeof(VSsTaskStub) + taskType->sizeOfArgs );
   8.112 -   newStub->numBlockingProp = taskType->numCtldArgs;
   8.113 -   newStub->slaveAssignedTo = NULL;
   8.114 -   newStub->taskType   = taskType;
   8.115 -   newStub->ptrEntries = 
   8.116 -      VMS_int__malloc( taskType->numCtldArgs * sizeof(VSsPointerEntry *) );
   8.117 -   newArgs = (void **)( (uint8 *)newStub + sizeof(VSsTaskStub) );
   8.118 -   newStub->args = newArgs;
   8.119 -   newStub->numLiveChildTasks   = 0;
   8.120 -   newStub->numLiveChildThreads = 0;
   8.121 -   newStub->isEnded = FALSE;
   8.122 -   
   8.123 -      //Copy the arg-pointers.. can be more arguments than just the ones 
   8.124 -      // that StarSs uses to control ordering of task execution.
   8.125 -   memcpy( newArgs, args, taskType->sizeOfArgs );
   8.126 -   
   8.127 -   return newStub;
   8.128 - }
   8.129 +create_task_stub(VSsTaskType *taskType, void **args) {
   8.130 +    void **newArgs;
   8.131 +    VSsTaskStub* newStub = VMS_int__malloc(sizeof (VSsTaskStub) + taskType->sizeOfArgs);
   8.132 +    newStub->numBlockingProp = taskType->numCtldArgs;
   8.133 +    newStub->slaveAssignedTo = NULL;
   8.134 +    newStub->taskType = taskType;
   8.135 +    newStub->ptrEntries =
   8.136 +            VMS_int__malloc(taskType->numCtldArgs * sizeof (VSsPointerEntry *));
   8.137 +    newArgs = (void **) ((uint8 *) newStub + sizeof (VSsTaskStub));
   8.138 +    newStub->args = newArgs;
   8.139 +    newStub->numLiveChildTasks = 0;
   8.140 +    newStub->numLiveChildThreads = 0;
   8.141 +    newStub->isWaitingForChildTasksToEnd = FALSE;
   8.142 +    newStub->isWaitingForChildThreadsToEnd = FALSE;
   8.143 +    newStub->isEnded = FALSE;
   8.144 +    newStub->taskID = NULL;
   8.145 +    newStub->parentTaskStub = NULL;
   8.146 +    //Copy the arg-pointers.. can be more arguments than just the ones 
   8.147 +    // that StarSs uses to control ordering of task execution.
   8.148 +    memcpy(newArgs, args, taskType->sizeOfArgs);
   8.149 +
   8.150 +    return newStub;
   8.151 +}
   8.152  
   8.153  inline VSsTaskStubCarrier *
   8.154 -create_task_carrier( VSsTaskStub *taskStub, int32 argNum, int32 rdOrWrite )
   8.155 - { VSsTaskStubCarrier *newCarrier;
   8.156 - 
   8.157 -   newCarrier = VMS_PI__malloc( sizeof(VSsTaskStubCarrier) );
   8.158 -   newCarrier->taskStub = taskStub;
   8.159 -   newCarrier->argNum   = argNum;
   8.160 -   newCarrier->isReader = rdOrWrite == READER;
   8.161 - }
   8.162 +create_task_carrier(VSsTaskStub *taskStub, int32 argNum, int32 rdOrWrite) {
   8.163 +    VSsTaskStubCarrier *newCarrier;
   8.164 +
   8.165 +    newCarrier = VMS_PI__malloc(sizeof (VSsTaskStubCarrier));
   8.166 +    newCarrier->taskStub = taskStub;
   8.167 +    newCarrier->argNum = argNum;
   8.168 +    newCarrier->isReader = rdOrWrite == READER;
   8.169 +}
   8.170  
   8.171  //==========================================================================
   8.172  //
   8.173  //
   8.174 +
   8.175  /*Submit Task
   8.176   * 
   8.177   *Uses a hash table to match the arg-pointers to each other. So, an
   8.178 @@ -215,126 +218,115 @@
   8.179   *That should be it -- that should work.
   8.180   */
   8.181  inline void
   8.182 -handleSubmitTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
   8.183 - { uint32           key[3];
   8.184 -   HashEntry       *rawHashEntry; //has char *, but use with uint32 *
   8.185 -   VSsPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
   8.186 -   void           **args;
   8.187 -   VSsTaskStub     *taskStub;
   8.188 -   VSsTaskType     *taskType;
   8.189 -   VSsTaskStubCarrier *taskCarrier;
   8.190 -   
   8.191 -   HashTable *
   8.192 -   argPtrHashTbl = semEnv->argPtrHashTbl;
   8.193 -   
   8.194 +handleSubmitTask(VSsSemReq *semReq, VSsSemEnv *semEnv) {
   8.195 +    uint32 key[3];
   8.196 +    HashEntry *rawHashEntry; //has char *, but use with uint32 *
   8.197 +    VSsPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
   8.198 +    void **args;
   8.199 +    VSsTaskStub *taskStub;
   8.200 +    VSsTaskType *taskType;
   8.201 +    VSsTaskStubCarrier *taskCarrier;
   8.202 +
   8.203 +    HashTable *
   8.204 +            argPtrHashTbl = semEnv->argPtrHashTbl;
   8.205 +
   8.206        //suspending a task always makes the slave into an extra slot slave,
   8.207        // because it ends up in the resumeQ, even when resumes immediately.
   8.208        //Eventually task_end will put the slave into the freeExtraTaskSlvQ
   8.209     replaceWithNewSlotSlvIfNeeded( semReq->callingSlv, semEnv );
   8.210 - 
   8.211 -   /* ==========================  creation  ========================== 
   8.212 -    * 
   8.213 -    *At creation, make a task-stub.  Set the count of blocking propendents
   8.214 -    * to the number of controlled arguments (a task can have
   8.215 -    * arguments that are not controlled by the language, like simple integer
   8.216 -    * inputs from the sequential portion. Note that all controlled arguments
   8.217 -    * are pointers, and marked as controlled in the application code).
   8.218 -    */
   8.219 -   args     = semReq->args;
   8.220 -   taskType = semReq->taskType;
   8.221 -   taskStub = create_task_stub( taskType, args );//copies arg ptrs
   8.222 -   taskStub->numBlockingProp = taskType->numCtldArgs;
   8.223 -   taskStub->taskID          = semReq->taskID; //may be NULL
   8.224 -   
   8.225 -   VSsSemData* 
   8.226 -   parentSemData = (VSsSemData*) semReq->callingSlv->semanticData;
   8.227 -   taskStub->parentTaskStub = (void*) parentSemData->taskStub;
   8.228 -   parentSemData->taskStub->numLiveChildTasks += 1;
   8.229 -   
   8.230 +
   8.231 +    /* ==========================  creation  ========================== 
   8.232 +     * 
   8.233 +     *At creation, make a task-stub.  Set the count of blocking propendents
   8.234 +     * to the number of controlled arguments (a task can have
   8.235 +     * arguments that are not controlled by the language, like simple integer
   8.236 +     * inputs from the sequential portion. Note that all controlled arguments
   8.237 +     * are pointers, and marked as controlled in the application code).
   8.238 +     */
   8.239 +    args = semReq->args;
   8.240 +    taskType = semReq->taskType;
   8.241 +    taskStub = create_task_stub(taskType, args); //copies arg ptrs
   8.242 +    taskStub->numBlockingProp = taskType->numCtldArgs;
   8.243 +    taskStub->taskID = semReq->taskID; //may be NULL
   8.244 +
   8.245 +    VSsSemData*
   8.246 +            parentSemData = (VSsSemData*) semReq->callingSlv->semanticData;
   8.247 +    taskStub->parentTaskStub = (void*) parentSemData->taskStub;
   8.248 +    parentSemData->taskStub->numLiveChildTasks += 1;
   8.249 +
   8.250           //DEBUG__printf3(dbgRqstHdlr,"Submit req from slaveID: %d, from task: %d, for task: %d", semReq->callingSlv->slaveID, parentSemData->taskStub->taskID[1], taskStub->taskID[1])
   8.251 -         DEBUG__printf2(dbgRqstHdlr,"Submit req from slaveID: %d, for task: %d", semReq->callingSlv->slaveID, taskStub->taskID[1])
   8.252 -          
   8.253 -   /*The controlled arguments are then processed one by one.
   8.254 -    *Processing an argument means getting the hash of the pointer.  Then,
   8.255 -    * looking up the hash entry.  (If none, create one).
   8.256 -    */
   8.257 -   int32 argNum;
   8.258 -   for( argNum = 0; argNum < taskType->numCtldArgs; argNum++ )
   8.259 -    { 
   8.260 -      key[0] = 2; //two 32b values in key
   8.261 -      *( (uint64*)&key[1]) = (uint64)args[argNum];  //write 64b into two 32b
   8.262 +       if(semReq->taskID) { DEBUG__printf2(dbgRqstHdlr,"Submit req from slaveID: %d, for task: %d", semReq->callingSlv->slaveID, taskStub->taskID[1]) }
   8.263 +       else { DEBUG__printf1(dbgRqstHdlr,"Submit req from slaveID: %d, for anonymous task", semReq->callingSlv->slaveID) }
   8.264 +    /*The controlled arguments are then processed one by one.
   8.265 +     *Processing an argument means getting the hash of the pointer.  Then,
   8.266 +     * looking up the hash entry.  (If none, create one).
   8.267 +     */
   8.268 +    int32 argNum;
   8.269 +    for (argNum = 0; argNum < taskType->numCtldArgs; argNum++) {
   8.270 +        key[0] = 2; //two 32b values in key
   8.271 +        *((uint64*) & key[1]) = (uint64) args[argNum]; //write 64b into two 32b
   8.272  
   8.273 -      /*If the hash entry was chained, put it at the
   8.274 -       * start of the chain.  (Means no-longer-used pointers accumulate
   8.275 -       * at end of chain, decide garbage collection later) */
   8.276 -      rawHashEntry = getEntryFromTable32( key, argPtrHashTbl );
   8.277 -      if( rawHashEntry == NULL )
   8.278 -       {    //adding a value auto-creates the hash-entry
   8.279 -         ptrEntry = create_pointer_entry();
   8.280 -         rawHashEntry = addValueIntoTable32( key, ptrEntry, argPtrHashTbl );
   8.281 -       }
   8.282 -      else
   8.283 -       { ptrEntry = (VSsPointerEntry *)rawHashEntry->content;
   8.284 -         if( ptrEntry == NULL )
   8.285 -          { ptrEntry = create_pointer_entry();
   8.286 +        /*If the hash entry was chained, put it at the
   8.287 +         * start of the chain.  (Means no-longer-used pointers accumulate
   8.288 +         * at end of chain, decide garbage collection later) */
   8.289 +        rawHashEntry = getEntryFromTable32(key, argPtrHashTbl);
   8.290 +        if (rawHashEntry == NULL) { //adding a value auto-creates the hash-entry
   8.291 +            ptrEntry = create_pointer_entry();
   8.292              rawHashEntry = addValueIntoTable32(key, ptrEntry, argPtrHashTbl);
   8.293 -          }
   8.294 -       }
   8.295 -      taskStub->ptrEntries[argNum] = ptrEntry;
   8.296 -      
   8.297 -      /*Have the hash entry.
   8.298 -       *If the arg is a reader and the entry does not have an enabled
   8.299 -       * non-finished writer, and the queue is empty. */
   8.300 -      if( taskType->argTypes[argNum] == READER )
   8.301 -       { if( !ptrEntry->hasEnabledNonFinishedWriter && 
   8.302 -             isEmptyPrivQ( ptrEntry->waitersQ ) )
   8.303 -          { /*The reader is free.  So, decrement the blocking-propendent
   8.304 +        } else {
   8.305 +            ptrEntry = (VSsPointerEntry *) rawHashEntry->content;
   8.306 +            if (ptrEntry == NULL) {
   8.307 +                ptrEntry = create_pointer_entry();
   8.308 +                rawHashEntry = addValueIntoTable32(key, ptrEntry, argPtrHashTbl);
   8.309 +            }
   8.310 +        }
   8.311 +        taskStub->ptrEntries[argNum] = ptrEntry;
   8.312 +
   8.313 +        /*Have the hash entry.
   8.314 +         *If the arg is a reader and the entry does not have an enabled
   8.315 +         * non-finished writer, and the queue is empty. */
   8.316 +        if (taskType->argTypes[argNum] == READER) {
   8.317 +            if (!ptrEntry->hasEnabledNonFinishedWriter &&
   8.318 +                    isEmptyPrivQ(ptrEntry->waitersQ)) { /*The reader is free.  So, decrement the blocking-propendent
   8.319               * count in the task-stub. If the count is zero, then put the
   8.320               * task-stub into the readyQ.  At the same time, increment
   8.321               * the hash-entry's count of enabled and non-finished readers.*/
   8.322 -            taskStub->numBlockingProp -= 1;
   8.323 -            if( taskStub->numBlockingProp == 0 )
   8.324 -             { writePrivQ( taskStub, semEnv->taskReadyQ );
   8.325 -             }
   8.326 -            ptrEntry->numEnabledNonDoneReaders += 1;
   8.327 -          }
   8.328 -         else
   8.329 -          { /*Otherwise, the reader is put into the hash-entry's Q of
   8.330 +                taskStub->numBlockingProp -= 1;
   8.331 +                if (taskStub->numBlockingProp == 0) {
   8.332 +                    writePrivQ(taskStub, semEnv->taskReadyQ);
   8.333 +                }
   8.334 +                ptrEntry->numEnabledNonDoneReaders += 1;
   8.335 +            } else { /*Otherwise, the reader is put into the hash-entry's Q of
   8.336               * waiters*/
   8.337 -            taskCarrier = create_task_carrier( taskStub, argNum, READER );
   8.338 -            writePrivQ( taskCarrier, ptrEntry->waitersQ );
   8.339 -          }
   8.340 -       }
   8.341 -      else //arg is a writer
   8.342 -       { /*the arg is a writer, plus the entry does not have a current
   8.343 +                taskCarrier = create_task_carrier(taskStub, argNum, READER);
   8.344 +                writePrivQ(taskCarrier, ptrEntry->waitersQ);
   8.345 +            }
   8.346 +        } else //arg is a writer
   8.347 +        { /*the arg is a writer, plus the entry does not have a current
   8.348            * writer, plus the number of enabled non-finished readers is
   8.349            * zero, (the Q must be empty, else bug!) then the writer is free*/
   8.350 -         if( !ptrEntry->hasEnabledNonFinishedWriter &&
   8.351 -              ptrEntry->numEnabledNonDoneReaders == 0 )
   8.352 -          { /*Mark the entry has having a enabled and non-finished writer.
   8.353 +            if (!ptrEntry->hasEnabledNonFinishedWriter &&
   8.354 +                    ptrEntry->numEnabledNonDoneReaders == 0) { /*Mark the entry has having a enabled and non-finished writer.
   8.355                * Decrement the blocking-propenden count in the writer's
   8.356                * task-stub. If the count is zero, then put the task-stub
   8.357                * into the readyQ.*/
   8.358 -            taskStub->numBlockingProp -= 1;
   8.359 -            if( taskStub->numBlockingProp == 0 )
   8.360 -             { writePrivQ( taskStub, semEnv->taskReadyQ );
   8.361 -             }
   8.362 -            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   8.363 -          }
   8.364 -         else
   8.365 -          {/*Otherwise, put the writer into the entry's Q of waiters.*/
   8.366 -            taskCarrier = create_task_carrier( taskStub, argNum, WRITER );
   8.367 -            writePrivQ( taskCarrier, ptrEntry->waitersQ );            
   8.368 -          }
   8.369 -       }
   8.370 +                taskStub->numBlockingProp -= 1;
   8.371 +                if (taskStub->numBlockingProp == 0) {
   8.372 +                    writePrivQ(taskStub, semEnv->taskReadyQ);
   8.373 +                }
   8.374 +                ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   8.375 +            } else {/*Otherwise, put the writer into the entry's Q of waiters.*/
   8.376 +                taskCarrier = create_task_carrier(taskStub, argNum, WRITER);
   8.377 +                writePrivQ(taskCarrier, ptrEntry->waitersQ);
   8.378 +            }
   8.379 +        }
   8.380      } //for argNum
   8.381 -   
   8.382 -   
   8.383 -   resume_slaveVP( semReq->callingSlv, semEnv );
   8.384  
   8.385 -   return;
   8.386 - }
   8.387  
   8.388 +    resume_slaveVP(semReq->callingSlv, semEnv);
   8.389 +
   8.390 +    return;
   8.391 +}
   8.392  
   8.393  /* ========================== end of task ===========================
   8.394   * 
   8.395 @@ -372,177 +364,193 @@
   8.396   * and no readers and no writers..
   8.397   */
   8.398  inline void
   8.399 -handleEndTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
   8.400 - { VSsPointerEntry  *ptrEntry; //contents of hash table entry for an arg pointer
   8.401 -   void            **args;
   8.402 -   VSsSemData       *endingSlvSemData;
   8.403 -   VSsTaskStub      *endingTaskStub, *waitingTaskStub, *parent;
   8.404 -   VSsTaskType      *endingTaskType;
   8.405 -   VSsTaskStubCarrier *waitingTaskCarrier;
   8.406 -   VSsPointerEntry **ptrEntries;
   8.407 -         
   8.408 - 
   8.409 -   endingSlvSemData = (VSsSemData *)semReq->callingSlv->semanticData;
   8.410 -   endingTaskStub   = endingSlvSemData->taskStub;
   8.411 -   args             = endingTaskStub->args;
   8.412 -   endingTaskType   = endingTaskStub->taskType;
   8.413 -   ptrEntries       = endingTaskStub->ptrEntries; //saved in stub when create
   8.414 -   
   8.415 -         DEBUG__printf2(dbgRqstHdlr,"EndTask req from slaveID: %d, task: %d",semReq->callingSlv->slaveID, endingTaskStub->taskID[1])
   8.416 +handleEndTask(VSsSemReq *semReq, VSsSemEnv *semEnv) {
   8.417 +    VSsPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
   8.418 +    void **args;
   8.419 +    VSsSemData *endingSlvSemData;
   8.420 +    VSsTaskStub *endingTaskStub, *waitingTaskStub, *parent;
   8.421 +    VSsTaskType *endingTaskType;
   8.422 +    VSsTaskStubCarrier *waitingTaskCarrier;
   8.423 +    VSsPointerEntry **ptrEntries;
   8.424 +
   8.425 +
   8.426 +    endingSlvSemData = (VSsSemData *) semReq->callingSlv->semanticData;
   8.427 +    endingTaskStub = endingSlvSemData->taskStub;
   8.428 +    args = endingTaskStub->args;
   8.429 +    endingTaskType = endingTaskStub->taskType;
   8.430 +    ptrEntries = endingTaskStub->ptrEntries; //saved in stub when create
   8.431 +
   8.432 +    if(semReq->taskID) {   DEBUG__printf2(dbgRqstHdlr,"EndTask req from slaveID: %d, task: %d",semReq->callingSlv->slaveID, endingTaskStub->taskID[1]) }
   8.433 +    else {DEBUG__printf1(dbgRqstHdlr,"EndTask req from slaveID: %d",semReq->callingSlv->slaveID)}
   8.434            
   8.435 -      //Check if parent was waiting on this task
   8.436 -   parent = (VSsTaskStub *) endingTaskStub->parentTaskStub;
   8.437 -   parent->numLiveChildTasks -= 1;
   8.438 -   if( parent->isWaitingForChildTasksToEnd && parent->numLiveChildTasks == 0)
   8.439 -    {
   8.440 -      parent->isWaitingForChildTasksToEnd = FALSE;
   8.441 -      resume_slaveVP( parent->slaveAssignedTo, semEnv );
   8.442 +    //Check if parent was waiting on this task
   8.443 +    parent = (VSsTaskStub *) endingTaskStub->parentTaskStub;
   8.444 +    parent->numLiveChildTasks -= 1;
   8.445 +    
   8.446 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   8.447 +            if (parent->isWaitingForChildTasksToEnd) {
   8.448 +                Dependency newd;
   8.449 +                newd.from_vp = semReq->callingSlv->slaveID;
   8.450 +                newd.from_task = semReq->callingSlv->assignCount;
   8.451 +                newd.to_vp = parent->slaveAssignedTo->slaveID;
   8.452 +                newd.to_task = parent->slaveAssignedTo->assignCount + 1;
   8.453 +                addToListOfArrays(Dependency, newd, semEnv->commDependenciesList);
   8.454 +            }
   8.455 +#endif 
   8.456 +    
   8.457 +    if (parent->isWaitingForChildTasksToEnd && parent->numLiveChildTasks == 0) {
   8.458 +        parent->isWaitingForChildTasksToEnd = FALSE;
   8.459 +        resume_slaveVP(parent->slaveAssignedTo, semEnv);
   8.460      }
   8.461 -   
   8.462 -      //Check if parent ended, and this was last descendent, then free it
   8.463 -   if( parent->isEnded && parent->numLiveChildTasks == 0 )
   8.464 -    { VMS_PI__free( parent );
   8.465 +
   8.466 +    //Check if parent ended, and this was last descendent, then free it
   8.467 +    if (parent->isEnded && parent->numLiveChildTasks == 0) {
   8.468 +        VMS_PI__free(parent);
   8.469      }
   8.470 -   
   8.471 -   
   8.472 -      //Now, update state of dependents and start ready tasks
   8.473 -   /*The task's controlled arguments are processed one by one.
   8.474 -    *Processing an argument means getting arg-pointer's entry.
   8.475 -    */
   8.476 -   int32 argNum;
   8.477 -   for( argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++ )
   8.478 -    { 
   8.479 -      /* commented out 'cause remembering entry ptr when create stub
   8.480 -      key[0] = 2; //says are 2 32b values in key
   8.481 -      *( (uint64*)&key[1] ) = args[argNum];  //write 64b ptr into two 32b
   8.482  
   8.483 -       /*If the hash entry was chained, put it at the
   8.484 -       * start of the chain.  (Means no-longer-used pointers accumulate
   8.485 -       * at end of chain, decide garbage collection later) 
   8.486 -       */
   8.487 -      /*NOTE: don't do hash lookups here, instead, have a pointer to the
   8.488 -       * hash entry inside task-stub, put there during task creation.
   8.489 -      rawHashEntry = getEntryFromTable32( key, ptrHashTbl );
   8.490 -      ptrEntry = (VSsPointerEntry *)rawHashEntry->content;
   8.491 -      if( ptrEntry == NULL ) 
   8.492 -          VMS_App__throw_exception("hash entry NULL", NULL, NULL);
   8.493 -      */ 
   8.494 -      
   8.495 -      ptrEntry = ptrEntries[argNum];
   8.496 -      /*check if the ending task was reader of this arg*/
   8.497 -      if( endingTaskType->argTypes[argNum] == READER )
   8.498 -       { /*then decrement the enabled and non-finished reader-count in
   8.499 -          * the hash-entry. */ 
   8.500 -         ptrEntry->numEnabledNonDoneReaders -= 1;
   8.501 -         
   8.502 -         /*If the count becomes zero, then take the next entry from the Q. 
   8.503 -          *It should be a writer, or else there's a bug in this algorithm.*/
   8.504 -         if( ptrEntry->numEnabledNonDoneReaders == 0 )
   8.505 -          { waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );
   8.506 -            if( waitingTaskCarrier == NULL ) 
   8.507 -             { //TODO: looks safe to delete the ptr entry at this point 
   8.508 -               continue; //next iter of loop
   8.509 -             }
   8.510 -            if( waitingTaskCarrier->isReader ) 
   8.511 -               VMS_App__throw_exception("READER waiting", NULL, NULL);
   8.512 -                   
   8.513 +
   8.514 +    //Now, update state of dependents and start ready tasks
   8.515 +    /*The task's controlled arguments are processed one by one.
   8.516 +     *Processing an argument means getting arg-pointer's entry.
   8.517 +     */
   8.518 +    int32 argNum;
   8.519 +    for (argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++) {
   8.520 +        /* commented out 'cause remembering entry ptr when create stub
   8.521 +        key[0] = 2; //says are 2 32b values in key
   8.522 +         *( (uint64*)&key[1] ) = args[argNum];  //write 64b ptr into two 32b
   8.523 +
   8.524 +         /*If the hash entry was chained, put it at the
   8.525 +         * start of the chain.  (Means no-longer-used pointers accumulate
   8.526 +         * at end of chain, decide garbage collection later) 
   8.527 +         */
   8.528 +        /*NOTE: don't do hash lookups here, instead, have a pointer to the
   8.529 +         * hash entry inside task-stub, put there during task creation.
   8.530 +        rawHashEntry = getEntryFromTable32( key, ptrHashTbl );
   8.531 +        ptrEntry = (VSsPointerEntry *)rawHashEntry->content;
   8.532 +        if( ptrEntry == NULL ) 
   8.533 +            VMS_App__throw_exception("hash entry NULL", NULL, NULL);
   8.534 +         */
   8.535 +
   8.536 +        ptrEntry = ptrEntries[argNum];
   8.537 +        /*check if the ending task was reader of this arg*/
   8.538 +        if (endingTaskType->argTypes[argNum] == READER) { /*then decrement the enabled and non-finished reader-count in
   8.539 +          * the hash-entry. */
   8.540 +            ptrEntry->numEnabledNonDoneReaders -= 1;
   8.541 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   8.542 +            if (ptrEntry->lastWriterVP) {
   8.543 +                Dependency newd;
   8.544 +                newd.from_vp = ptrEntry->lastWriterVP;
   8.545 +                newd.from_task = ptrEntry->lastWriterTask;
   8.546 +                newd.to_vp = semReq->callingSlv->slaveID;
   8.547 +                newd.to_task = semReq->callingSlv->assignCount;
   8.548 +                addToListOfArrays(Dependency, newd, semEnv->dataDependenciesList);
   8.549 +            }
   8.550 +#endif
   8.551 +            /*If the count becomes zero, then take the next entry from the Q. 
   8.552 +             *It should be a writer, or else there's a bug in this algorithm.*/
   8.553 +            if (ptrEntry->numEnabledNonDoneReaders == 0) {
   8.554 +                waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ);
   8.555 +                if (waitingTaskCarrier == NULL) { //TODO: looks safe to delete the ptr entry at this point 
   8.556 +                    continue; //next iter of loop
   8.557 +                }
   8.558 +                if (waitingTaskCarrier->isReader)
   8.559 +                    VMS_App__throw_exception("READER waiting", NULL, NULL);
   8.560 +
   8.561 +                waitingTaskStub = waitingTaskCarrier->taskStub;
   8.562 +
   8.563 +                /*Set the hash-entry to have an enabled non-finished writer.*/
   8.564 +                ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   8.565 +
   8.566 +                /* Decrement the blocking-propendent-count of the writer's
   8.567 +                 * task-stub.  If the count has reached zero, then put the
   8.568 +                 * task-stub into the readyQ.*/
   8.569 +                waitingTaskStub->numBlockingProp -= 1;
   8.570 +                if (waitingTaskStub->numBlockingProp == 0) {
   8.571 +                    writePrivQ(waitingTaskStub, semEnv->taskReadyQ);
   8.572 +                }
   8.573 +            }
   8.574 +        } else /*the ending task is a writer of this arg*/ { /*clear the enabled non-finished writer flag of the hash-entry.*/
   8.575 +            ptrEntry->hasEnabledNonFinishedWriter = FALSE;
   8.576 +            #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   8.577 +            ptrEntry->lastWriterVP = semReq->callingSlv->slaveID;
   8.578 +            ptrEntry->lastWriterTask = semReq->callingSlv->assignCount;
   8.579 +            #endif
   8.580 +
   8.581 +            /*Take the next waiter from the hash-entry's Q.*/
   8.582 +            waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ);
   8.583 +            if (waitingTaskCarrier == NULL) { //TODO: looks safe to delete ptr entry at this point
   8.584 +                continue; //go to next iter of loop, done here.
   8.585 +            }
   8.586              waitingTaskStub = waitingTaskCarrier->taskStub;
   8.587 -            
   8.588 -            /*Set the hash-entry to have an enabled non-finished writer.*/
   8.589 -            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   8.590 -            
   8.591 -            /* Decrement the blocking-propendent-count of the writer's
   8.592 -             * task-stub.  If the count has reached zero, then put the
   8.593 -             * task-stub into the readyQ.*/
   8.594 -            waitingTaskStub->numBlockingProp -= 1;
   8.595 -            if( waitingTaskStub->numBlockingProp == 0 )
   8.596 -             { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
   8.597 -             }
   8.598 -          }
   8.599 -       }
   8.600 -      else /*the ending task is a writer of this arg*/ 
   8.601 -       { /*clear the enabled non-finished writer flag of the hash-entry.*/
   8.602 -         ptrEntry->hasEnabledNonFinishedWriter = FALSE;
   8.603 -         
   8.604 -         /*Take the next waiter from the hash-entry's Q.*/
   8.605 -         waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );
   8.606 -         if( waitingTaskCarrier == NULL )
   8.607 -          { //TODO: looks safe to delete ptr entry at this point
   8.608 -            continue; //go to next iter of loop, done here.
   8.609 -          }
   8.610 -         waitingTaskStub = waitingTaskCarrier->taskStub;
   8.611 -         
   8.612 -         /*If task is a writer of this hash-entry's pointer*/
   8.613 -         if( !waitingTaskCarrier->isReader ) 
   8.614 -          { /* then turn the flag back on.*/
   8.615 -            ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   8.616 -            /*Decrement the writer's blocking-propendent-count in task-stub
   8.617 -             * If it becomes zero, then put the task-stub into the readyQ.*/
   8.618 -            waitingTaskStub->numBlockingProp -= 1;
   8.619 -            if( waitingTaskStub->numBlockingProp == 0 )
   8.620 -             { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
   8.621 -             }
   8.622 -          }
   8.623 -         else
   8.624 -          { /*Waiting task is a reader, so do a loop, of all waiting readers
   8.625 +
   8.626 +            /*If task is a writer of this hash-entry's pointer*/
   8.627 +            if (!waitingTaskCarrier->isReader) { /* then turn the flag back on.*/
   8.628 +                ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   8.629 +                /*Decrement the writer's blocking-propendent-count in task-stub
   8.630 +                 * If it becomes zero, then put the task-stub into the readyQ.*/
   8.631 +                waitingTaskStub->numBlockingProp -= 1;
   8.632 +                if (waitingTaskStub->numBlockingProp == 0) {
   8.633 +                    writePrivQ(waitingTaskStub, semEnv->taskReadyQ);
   8.634 +                }
   8.635 +            } else { /*Waiting task is a reader, so do a loop, of all waiting readers
   8.636               * until encounter a writer or waitersQ is empty*/
   8.637 -            while( TRUE ) /*The checks guarantee have a waiting reader*/
   8.638 -             { /*Increment the hash-entry's count of enabled non-finished
   8.639 +                while (TRUE) /*The checks guarantee have a waiting reader*/ { /*Increment the hash-entry's count of enabled non-finished
   8.640                  * readers.*/
   8.641 -               ptrEntry->numEnabledNonDoneReaders += 1;
   8.642 +                    ptrEntry->numEnabledNonDoneReaders += 1;
   8.643  
   8.644 -               /*Decrement the blocking propendents count of the reader's
   8.645 -                * task-stub.  If it reaches zero, then put the task-stub
   8.646 -                * into the readyQ.*/
   8.647 -               waitingTaskStub->numBlockingProp -= 1;
   8.648 -               if( waitingTaskStub->numBlockingProp == 0 )
   8.649 -                { writePrivQ( waitingTaskStub, semEnv->taskReadyQ );
   8.650 -                }
   8.651 -               /*Get next waiting task*/
   8.652 -               waitingTaskCarrier = peekPrivQ( ptrEntry->waitersQ );
   8.653 -               if( waitingTaskCarrier == NULL ) break;
   8.654 -               if( !waitingTaskCarrier->isReader ) break;
   8.655 -               waitingTaskCarrier = readPrivQ( ptrEntry->waitersQ );               
   8.656 -               waitingTaskStub = waitingTaskCarrier->taskStub;
   8.657 -             }//while waiter is a reader
   8.658 -          }//if-else, first waiting task is a reader
   8.659 -       }//if-else, check of ending task, whether writer or reader
   8.660 +                    /*Decrement the blocking propendents count of the reader's
   8.661 +                     * task-stub.  If it reaches zero, then put the task-stub
   8.662 +                     * into the readyQ.*/
   8.663 +                    waitingTaskStub->numBlockingProp -= 1;
   8.664 +                    if (waitingTaskStub->numBlockingProp == 0) {
   8.665 +                        writePrivQ(waitingTaskStub, semEnv->taskReadyQ);
   8.666 +                    }
   8.667 +                    /*Get next waiting task*/
   8.668 +                    waitingTaskCarrier = peekPrivQ(ptrEntry->waitersQ);
   8.669 +                    if (waitingTaskCarrier == NULL) break;
   8.670 +                    if (!waitingTaskCarrier->isReader) break;
   8.671 +                    waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ);
   8.672 +                    waitingTaskStub = waitingTaskCarrier->taskStub;
   8.673 +                }//while waiter is a reader
   8.674 +            }//if-else, first waiting task is a reader
   8.675 +        }//if-else, check of ending task, whether writer or reader
   8.676      }//for argnum in ending task
   8.677 -   
   8.678 -   
   8.679 -      //done ending the task, now free the stub + args copy
   8.680 -      // if still has live children, then keep stub around
   8.681 -   if( endingTaskStub->numLiveChildTasks   == 0 &&
   8.682 -       endingTaskStub->numLiveChildThreads == 0 )
   8.683 -    { free_task_stub( endingTaskStub ); 
   8.684 +
   8.685 +
   8.686 +    //done ending the task, now free the stub + args copy
   8.687 +    // if still has live children, then keep stub around
   8.688 +    if (endingTaskStub->numLiveChildTasks == 0 &&
   8.689 +            endingTaskStub->numLiveChildThreads == 0) {
   8.690 +        free_task_stub(endingTaskStub);
   8.691      }
   8.692 -   
   8.693 -   
   8.694 -   endingSlvSemData->needsTaskAssigned = TRUE;
   8.695 -   
   8.696 -      //Check if the slave is an extra task slave, and put into free Q
   8.697 -   if( endingSlvSemData->slaveType == ExtraTaskSlv )
   8.698 -    { writePrivQ( semReq->callingSlv, semEnv->freeExtraTaskSlvQ );
   8.699 +
   8.700 +
   8.701 +    endingSlvSemData->needsTaskAssigned = TRUE;
   8.702 +
   8.703 +    //Check if the slave is an extra task slave, and put into free Q
   8.704 +    if (endingSlvSemData->slaveType == ExtraTaskSlv) {
   8.705 +        writePrivQ(semReq->callingSlv, semEnv->freeExtraTaskSlvQ);
   8.706      }
   8.707 -   
   8.708 -      //otherwise, it's a slot slave, so it will get used from matrix
   8.709 -      // so, do nothing with it, just return
   8.710 -   return; 
   8.711 - }
   8.712 +
   8.713 +    //otherwise, it's a slot slave, so it will get used from matrix
   8.714 +    // so, do nothing with it, just return
   8.715 +    return;
   8.716 +}
   8.717  
   8.718  inline void
   8.719 -free_task_stub( VSsTaskStub *stubToFree )
   8.720 +free_task_stub(VSsTaskStub *stubToFree) 
   8.721   { if(stubToFree->ptrEntries != NULL ) //a thread stub has NULL entry
   8.722      { VMS_PI__free( stubToFree->ptrEntries );
   8.723      }
   8.724     VMS_PI__free( stubToFree );
   8.725 - }
   8.726 +}
   8.727  
   8.728  //========================== Task Comm handlers ===========================
   8.729  
   8.730  
   8.731  
   8.732  //============================  Send Handlers ==============================
   8.733 +
   8.734  /*Send of Type -- The semantic request has the receiving task ID and Type
   8.735   *
   8.736   *Messages of a given Type have to be kept separate..  so need a separate
   8.737 @@ -554,16 +562,16 @@
   8.738   * receive task, so they will stack up.
   8.739   */
   8.740  inline void
   8.741 -handleSendTypeTo( VSsSemReq *semReq, VSsSemEnv *semEnv )
   8.742 - { SlaveVP    *senderSlv, *receiverSlv;
   8.743 -   int32      *senderID, *receiverID;
   8.744 -   int32      *key, keySz, receiverIDNumInt;
   8.745 -   VSsSemReq  *waitingReq;
   8.746 -   HashEntry  *entry;
   8.747 -   HashTable  *commHashTbl = semEnv->commHashTbl;
   8.748 -   
   8.749 -   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
   8.750 -   senderSlv   = semReq->senderSlv;
   8.751 +handleSendTypeTo(VSsSemReq *semReq, VSsSemEnv *semEnv) {
   8.752 +    SlaveVP *senderSlv, *receiverSlv;
   8.753 +    int32 *senderID, *receiverID;
   8.754 +    int32 *key, keySz, receiverIDNumInt;
   8.755 +    VSsSemReq *waitingReq;
   8.756 +    HashEntry *entry;
   8.757 +    HashTable *commHashTbl = semEnv->commHashTbl;
   8.758 +
   8.759 +    receiverID = semReq->receiverID; //For "send", know both send & recv procrs
   8.760 +    senderSlv = semReq->senderSlv;
   8.761  
   8.762           DEBUG__printf2(dbgRqstHdlr,"SendType req from sender slaveID: %d, recTask: %d", senderSlv->slaveID, receiverID[1])
   8.763            
   8.764 @@ -572,106 +580,105 @@
   8.765        //Eventually task_end will put the slave into the freeExtraTaskSlvQ
   8.766     replaceWithNewSlotSlvIfNeeded( senderSlv, semEnv );
   8.767           
   8.768 -   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
   8.769 +    receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
   8.770     keySz = receiverIDNumInt * sizeof(int32) + 2 * sizeof(int32);
   8.771 -   key = VMS_PI__malloc( keySz );
   8.772 +    key = VMS_PI__malloc(keySz);
   8.773     key[0] = receiverIDNumInt + 1; //loc 0 is num int32 in key
   8.774     memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
   8.775     key[ 1 + receiverIDNumInt ] = semReq->msgType; 
   8.776 -   
   8.777 -   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   8.778 -   if( entry == NULL ) //was just inserted, means task has to wait
   8.779 +
   8.780 +    entry = giveEntryElseInsertReqst32(key, semReq, commHashTbl);
   8.781 +    if (entry == NULL) //was just inserted, means task has to wait
   8.782      { return;
   8.783      }
   8.784  
   8.785 -      //if here, found a waiting request with same key
   8.786 -   waitingReq = (VSsSemReq *)entry->content;
   8.787 +    //if here, found a waiting request with same key
   8.788 +    waitingReq = (VSsSemReq *) entry->content;
   8.789  
   8.790 -      //At this point, know have waiting request(s) -- either sends or recv
   8.791 -      //Note, can only have max of one receive waiting, and cannot have both
   8.792 -      // sends and receives waiting (they would have paired off)
   8.793 -      // but can have multiple sends from diff sending VPs, all same msg-type
   8.794 -   if( waitingReq->reqType == send_type_to )
   8.795 -    {    //waiting request is another send, so stack this up on list
   8.796 -         // but first clone the sending request so it persists.
   8.797 -      VSsSemReq *clonedReq = cloneReq( semReq );
   8.798 -      clonedReq-> nextReqInHashEntry = waitingReq->nextReqInHashEntry;
   8.799 -      waitingReq->nextReqInHashEntry = clonedReq;
   8.800 -         DEBUG__printf2( dbgRqstHdlr, "linked requests: %p, %p ", clonedReq,\
   8.801 -                                                                 waitingReq )
   8.802 -      return;
   8.803 -    }
   8.804 -   else
   8.805 -    {    
   8.806 -       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   8.807 +    //At this point, know have waiting request(s) -- either sends or recv
   8.808 +    //Note, can only have max of one receive waiting, and cannot have both
   8.809 +    // sends and receives waiting (they would have paired off)
   8.810 +    // but can have multiple sends from diff sending VPs, all same msg-type
   8.811 +    if (waitingReq->reqType == send_type_to) { //waiting request is another send, so stack this up on list
   8.812 +        // but first clone the sending request so it persists.
   8.813 +        VSsSemReq *clonedReq = cloneReq(semReq);
   8.814 +        clonedReq-> nextReqInHashEntry = waitingReq->nextReqInHashEntry;
   8.815 +        waitingReq->nextReqInHashEntry = clonedReq;
   8.816 +        DEBUG__printf2(dbgRqstHdlr, "linked requests: %p, %p ", clonedReq,\
   8.817 +                                                                 waitingReq)
   8.818 +        return;
   8.819 +    } else {
   8.820 +
   8.821 +
   8.822 +        //set receiver slave, from the waiting request
   8.823 +        receiverSlv = waitingReq->receiverSlv;
   8.824 +
   8.825 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   8.826          Dependency newd;
   8.827 -        newd.from_vp = senderID->slaveID;
   8.828 -        newd.from_task = senderID->assignCount;
   8.829 -        newd.to_vp = receiverID->slaveID;
   8.830 -        newd.to_task = receiverID->assignCount +1;
   8.831 +        newd.from_vp = senderSlv->slaveID;
   8.832 +        newd.from_task = senderSlv->assignCount;
   8.833 +        newd.to_vp = receiverSlv->slaveID;
   8.834 +        newd.to_task = receiverSlv->assignCount + 1;
   8.835          //(newd,semEnv->commDependenciesList);  
   8.836 -        addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList);  
   8.837 -                int32 groupId = semReq->msgType;
   8.838 -        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
   8.839 +        addToListOfArrays(Dependency, newd, semEnv->dynDependenciesList);
   8.840 +        int32 groupId = semReq->msgType;
   8.841 +        if (semEnv->ntonGroupsInfo->numInArray <= groupId) {
   8.842              makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
   8.843          }
   8.844 -        if(semEnv->ntonGroups[groupId] == NULL){
   8.845 +        if (semEnv->ntonGroups[groupId] == NULL) {
   8.846              semEnv->ntonGroups[groupId] = new_NtoN(groupId);
   8.847          }
   8.848          Unit u;
   8.849 -        u.vp = senderID->slaveID;
   8.850 -        u.task = senderID->assignCount;
   8.851 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
   8.852 -        u.vp = receiverID->slaveID;
   8.853 -        u.task = receiverID->assignCount +1;
   8.854 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
   8.855 -       #endif
   8.856 +        u.vp = senderSlv->slaveID;
   8.857 +        u.task = senderSlv->assignCount;
   8.858 +        addToListOfArrays(Unit, u, semEnv->ntonGroups[groupId]->senders);
   8.859 +        u.vp = receiverSlv->slaveID;
   8.860 +        u.task = receiverSlv->assignCount + 1;
   8.861 +        addToListOfArrays(Unit, u, semEnv->ntonGroups[groupId]->receivers);
   8.862 +#endif
   8.863  
   8.864 -         //set receiver slave, from the waiting request
   8.865 -      receiverSlv = waitingReq->receiverSlv;
   8.866 -      
   8.867 -         //waiting request is a receive_type_to, so it pairs to this send
   8.868 -         //First, remove the waiting receive request from the entry
   8.869 -      entry->content = waitingReq->nextReqInHashEntry;
   8.870 -      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
   8.871 -      
   8.872 -      if( entry->content == NULL )
   8.873 -       {    //TODO: mod hash table to double-link, so can delete entry from
   8.874 +        //waiting request is a receive_type_to, so it pairs to this send
   8.875 +        //First, remove the waiting receive request from the entry
   8.876 +        entry->content = waitingReq->nextReqInHashEntry;
   8.877 +        VMS_PI__free(waitingReq); //Don't use contents -- so free it
   8.878 +
   8.879 +        if (entry->content == NULL) { //TODO: mod hash table to double-link, so can delete entry from
   8.880              // table without hashing the key and looking it up again
   8.881 -         deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees hashEntry
   8.882 -       }
   8.883 -      
   8.884 -         //attach msg that's in this send request to receiving task's Slv
   8.885 -         // when comes back from suspend will have msg in dataRetFromReq
   8.886 -      receiverSlv->dataRetFromReq = semReq->msg;
   8.887 +            deleteEntryFromTable32((uint32*) entry->key, commHashTbl); //frees hashEntry
   8.888 +        }
   8.889  
   8.890 -         //bring both processors back from suspend
   8.891 -      resume_slaveVP( senderSlv,   semEnv );
   8.892 -      resume_slaveVP( receiverSlv, semEnv );
   8.893 +        //attach msg that's in this send request to receiving task's Slv
   8.894 +        // when comes back from suspend will have msg in dataRetFromReq
   8.895 +        receiverSlv->dataRetFromReq = semReq->msg;
   8.896  
   8.897 -      return;
   8.898 +        //bring both processors back from suspend
   8.899 +        resume_slaveVP(senderSlv, semEnv);
   8.900 +        resume_slaveVP(receiverSlv, semEnv);
   8.901 +
   8.902 +        return;
   8.903      }
   8.904 - }
   8.905 +}
   8.906  
   8.907  
   8.908  /*Looks like can make single handler for both sends..
   8.909   */
   8.910  //TODO: combine both send handlers into single handler
   8.911 +
   8.912  inline void
   8.913 -handleSendFromTo( VSsSemReq *semReq, VSsSemEnv *semEnv)
   8.914 - { SlaveVP     *senderSlv, *receiverSlv;
   8.915 -   int32       *senderID, *receiverID;
   8.916 -   int32       *key, keySz, receiverIDNumInt, senderIDNumInt;
   8.917 -   VSsSemReq   *waitingReq;
   8.918 -   HashEntry   *entry;
   8.919 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   8.920 +handleSendFromTo(VSsSemReq *semReq, VSsSemEnv *semEnv) {
   8.921 +    SlaveVP *senderSlv, *receiverSlv;
   8.922 +    int32 *senderID, *receiverID;
   8.923 +    int32 *key, keySz, receiverIDNumInt, senderIDNumInt;
   8.924 +    VSsSemReq *waitingReq;
   8.925 +    HashEntry *entry;
   8.926 +    HashTable *commHashTbl = semEnv->commHashTbl;
   8.927  
   8.928           DEBUG__printf2(dbgRqstHdlr,"SendFromTo req from task %d to %d",
   8.929 -                        semReq->senderID[1],semReq->receiverID[1])
   8.930 -   
   8.931 -   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
   8.932 -   senderID    = semReq->senderID;
   8.933 -   senderSlv   = semReq->senderSlv;
   8.934 +            semReq->senderID[1], semReq->receiverID[1])
   8.935 +
   8.936 +            receiverID = semReq->receiverID; //For "send", know both send & recv procrs
   8.937 +    senderID = semReq->senderID;
   8.938 +    senderSlv = semReq->senderSlv;
   8.939  
   8.940        //suspending a task always makes the slave into an extra slot slave,
   8.941        // because it ends up in the resumeQ, even when resumes immediately.
   8.942 @@ -679,229 +686,224 @@
   8.943     replaceWithNewSlotSlvIfNeeded( senderSlv, semEnv );
   8.944     
   8.945     receiverIDNumInt = receiverID[0] + 1; //include the count in the key
   8.946 -   senderIDNumInt   = senderID[0] + 1;
   8.947 +    senderIDNumInt = senderID[0] + 1;
   8.948     keySz = (receiverIDNumInt + senderIDNumInt) * sizeof(int32) + sizeof(int32);
   8.949 -   key   = VMS_PI__malloc( keySz );
   8.950 +    key = VMS_PI__malloc(keySz);
   8.951     key[0] = receiverIDNumInt + senderIDNumInt;
   8.952     memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
   8.953     memcpy( &key[1 + receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32) );
   8.954  
   8.955 -   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   8.956 -   if( entry == NULL ) //was just inserted, means task has to wait
   8.957 +    entry = giveEntryElseInsertReqst32(key, semReq, commHashTbl);
   8.958 +    if (entry == NULL) //was just inserted, means task has to wait
   8.959      { return;
   8.960      }
   8.961  
   8.962 -   waitingReq = (VSsSemReq *)entry->content;
   8.963 +    waitingReq = (VSsSemReq *) entry->content;
   8.964  
   8.965 -      //At this point, know have waiting request(s) -- either sends or recv
   8.966 -   if( waitingReq->reqType == send_from_to )
   8.967 -    { printf("\n ERROR: shouldn't be two send-from-tos waiting \n");
   8.968 +    //At this point, know have waiting request(s) -- either sends or recv
   8.969 +    if (waitingReq->reqType == send_from_to) {
   8.970 +        printf("\n ERROR: shouldn't be two send-from-tos waiting \n");
   8.971 +    } else { //waiting request is a receive, so it completes pair with this send
   8.972 +
   8.973 +        //set receiver slave, from the waiting request
   8.974 +        receiverSlv = waitingReq->receiverSlv;
   8.975 +
   8.976 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   8.977 +        Dependency newd;
   8.978 +        newd.from_vp = senderSlv->slaveID;
   8.979 +        newd.from_task = senderSlv->assignCount;
   8.980 +        newd.to_vp = receiverSlv->slaveID;
   8.981 +        newd.to_task = receiverSlv->assignCount + 1;
   8.982 +        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   8.983 +        addToListOfArrays(Dependency, newd, semEnv->commDependenciesList);
   8.984 +#endif 
   8.985 +
   8.986 +        //First, remove the waiting receive request from the entry
   8.987 +        entry->content = waitingReq->nextReqInHashEntry;
   8.988 +        VMS_PI__free(waitingReq); //Don't use contents -- so free it
   8.989 +
   8.990 +        //can only be one waiting req for "from-to" semantics
   8.991 +        if (entry->content != NULL) {
   8.992 +            printf("\nERROR in handleSendFromTo\n");
   8.993 +        }
   8.994 +        deleteEntryFromTable32((uint32*) entry->key, commHashTbl); //frees HashEntry
   8.995 +
   8.996 +        //attach msg that's in this send request to receiving procr
   8.997 +        // when comes back from suspend, will have msg in dataRetFromReq
   8.998 +        receiverSlv->dataRetFromReq = semReq->msg;
   8.999 +
  8.1000 +        //bring both processors back from suspend
  8.1001 +        resume_slaveVP(senderSlv, semEnv);
  8.1002 +        resume_slaveVP(receiverSlv, semEnv);
  8.1003 +
  8.1004 +        return;
  8.1005      }
  8.1006 -   else
  8.1007 -    {    //waiting request is a receive, so it completes pair with this send
  8.1008 -      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  8.1009 -        Dependency newd;
  8.1010 -        newd.from_vp = sendPr->slaveID;
  8.1011 -        newd.from_task = sendPr->assignCount;
  8.1012 -        newd.to_vp = receivePr->slaveID;
  8.1013 -        newd.to_task = receivePr->assignCount +1;
  8.1014 -        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
  8.1015 -        addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
  8.1016 -      #endif 
  8.1017 -
  8.1018 -         //set receiver slave, from the waiting request
  8.1019 -      receiverSlv = waitingReq->receiverSlv;
  8.1020 -       
  8.1021 -         //First, remove the waiting receive request from the entry
  8.1022 -      entry->content = waitingReq->nextReqInHashEntry;
  8.1023 -      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
  8.1024 -      
  8.1025 -         //can only be one waiting req for "from-to" semantics
  8.1026 -      if( entry->content != NULL )
  8.1027 -       {
  8.1028 -         printf("\nERROR in handleSendFromTo\n");
  8.1029 -       }
  8.1030 -      deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees HashEntry
  8.1031 -
  8.1032 -         //attach msg that's in this send request to receiving procr
  8.1033 -         // when comes back from suspend, will have msg in dataRetFromReq
  8.1034 -      receiverSlv->dataRetFromReq = semReq->msg;
  8.1035 -
  8.1036 -         //bring both processors back from suspend
  8.1037 -      resume_slaveVP( senderSlv,   semEnv );
  8.1038 -      resume_slaveVP( receiverSlv, semEnv );
  8.1039 -            
  8.1040 -      return;
  8.1041 -    }
  8.1042 - }
  8.1043 +}
  8.1044  
  8.1045  
  8.1046  
  8.1047  //==============================  Receives  ===========================
  8.1048  //
  8.1049  
  8.1050 +inline void
  8.1051 +handleReceiveTypeTo(VSsSemReq *semReq, VSsSemEnv *semEnv) {
  8.1052 +    SlaveVP *senderSlv, *receiverSlv;
  8.1053 +    int32 *receiverID;
  8.1054 +    int32 *key, keySz, receiverIDNumInt;
  8.1055 +    VSsSemReq *waitingReq;
  8.1056 +    HashEntry *entry;
  8.1057 +    HashTable *commHashTbl = semEnv->commHashTbl;
  8.1058  
  8.1059 -inline void
  8.1060 -handleReceiveTypeTo( VSsSemReq *semReq, VSsSemEnv *semEnv)
  8.1061 - { SlaveVP    *senderSlv, *receiverSlv;
  8.1062 -   int32      *receiverID;
  8.1063 -   int32      *key, keySz, receiverIDNumInt;
  8.1064 -   VSsSemReq  *waitingReq;
  8.1065 -   HashEntry  *entry;
  8.1066 -   HashTable  *commHashTbl = semEnv->commHashTbl;
  8.1067 -   
  8.1068           DEBUG__printf2(dbgRqstHdlr,"ReceiveType req to ID: %d type: %d",semReq->receiverID[1], semReq->msgType)
  8.1069 - 
  8.1070 -   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
  8.1071 -   receiverSlv = semReq->receiverSlv;
  8.1072 +
  8.1073 +    receiverID = semReq->receiverID; //For "send", know both send & recv procrs
  8.1074 +    receiverSlv = semReq->receiverSlv;
  8.1075     
  8.1076        //suspending a task always makes the slave into an extra slot slave,
  8.1077        // because it ends up in the resumeQ, even when resumes immediately.
  8.1078        //Eventually task_end will put the slave into the freeExtraTaskSlvQ
  8.1079     replaceWithNewSlotSlvIfNeeded( receiverSlv, semEnv );
  8.1080  
  8.1081 -      //key is the receiverID plus the type -- have to copy them into key
  8.1082 -   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
  8.1083 +    //key is the receiverID plus the type -- have to copy them into key
  8.1084 +    receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
  8.1085     keySz = receiverIDNumInt * sizeof(int32) + 2 * sizeof(int32);
  8.1086 -   key = VMS_PI__malloc( keySz );
  8.1087 +    key = VMS_PI__malloc(keySz);
  8.1088     key[0] = receiverIDNumInt + 1; //loc 0 is num int32s in key
  8.1089     memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
  8.1090     key[ 1 + receiverIDNumInt ] = semReq->msgType; 
  8.1091  
  8.1092 -   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );//clones
  8.1093 -   if( entry == NULL ) //was just inserted, means task has to wait
  8.1094 +    entry = giveEntryElseInsertReqst32(key, semReq, commHashTbl); //clones
  8.1095 +    if (entry == NULL) //was just inserted, means task has to wait
  8.1096      { return;
  8.1097      }
  8.1098  
  8.1099 -   waitingReq = (VSsSemReq *)entry->content;  //previously cloned by insert
  8.1100 +    waitingReq = (VSsSemReq *) entry->content; //previously cloned by insert
  8.1101  
  8.1102 -      //At this point, know have waiting request(s) -- should be send(s)
  8.1103 -   if( waitingReq->reqType == send_type_to )
  8.1104 -    {    
  8.1105 -         //set sending slave  from the request
  8.1106 -      senderSlv = waitingReq->senderSlv;
  8.1107 -      
  8.1108 -         //waiting request is a send, so pair it with this receive
  8.1109 -         //first, remove the waiting send request from the list in entry
  8.1110 -      entry->content = waitingReq->nextReqInHashEntry;
  8.1111 -      if( entry->content == NULL )
  8.1112 -       { deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees HashEntry
  8.1113 -       }
  8.1114 -      
  8.1115 -         //attach msg that's in the send request to receiving procr
  8.1116 -         // when comes back from suspend, will have msg in dataRetFromReq
  8.1117 -      receiverSlv->dataRetFromReq = waitingReq->msg;
  8.1118 +    //At this point, know have waiting request(s) -- should be send(s)
  8.1119 +    if (waitingReq->reqType == send_type_to) {
  8.1120 +        //set sending slave  from the request
  8.1121 +        senderSlv = waitingReq->senderSlv;
  8.1122  
  8.1123 -         //bring both processors back from suspend
  8.1124 -      VMS_PI__free( waitingReq );
  8.1125 +        //waiting request is a send, so pair it with this receive
  8.1126 +        //first, remove the waiting send request from the list in entry
  8.1127 +        entry->content = waitingReq->nextReqInHashEntry;
  8.1128 +        if (entry->content == NULL) {
  8.1129 +            deleteEntryFromTable32((uint32*) entry->key, commHashTbl); //frees HashEntry
  8.1130 +        }
  8.1131  
  8.1132 -       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  8.1133 +        //attach msg that's in the send request to receiving procr
  8.1134 +        // when comes back from suspend, will have msg in dataRetFromReq
  8.1135 +        receiverSlv->dataRetFromReq = waitingReq->msg;
  8.1136 +
  8.1137 +        //bring both processors back from suspend
  8.1138 +        VMS_PI__free(waitingReq);
  8.1139 +
  8.1140 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  8.1141          Dependency newd;
  8.1142 -        newd.from_vp = sendPr->slaveID;
  8.1143 -        newd.from_task = sendPr->assignCount;
  8.1144 -        newd.to_vp = receivePr->slaveID;
  8.1145 -        newd.to_task = receivePr->assignCount +1;
  8.1146 +        newd.from_vp = senderSlv->slaveID;
  8.1147 +        newd.from_task = senderSlv->assignCount;
  8.1148 +        newd.to_vp = receiverSlv->slaveID;
  8.1149 +        newd.to_task = receiverSlv->assignCount + 1;
  8.1150          //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
  8.1151 -        addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList); 
  8.1152 +        addToListOfArrays(Dependency, newd, semEnv->dynDependenciesList);
  8.1153          int32 groupId = semReq->msgType;
  8.1154 -        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
  8.1155 +        if (semEnv->ntonGroupsInfo->numInArray <= groupId) {
  8.1156              makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
  8.1157          }
  8.1158 -        if(semEnv->ntonGroups[groupId] == NULL){
  8.1159 +        if (semEnv->ntonGroups[groupId] == NULL) {
  8.1160              semEnv->ntonGroups[groupId] = new_NtoN(groupId);
  8.1161          }
  8.1162          Unit u;
  8.1163 -        u.vp = sendPr->slaveID;
  8.1164 -        u.task = sendPr->assignCount;
  8.1165 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
  8.1166 -        u.vp = receivePr->slaveID;
  8.1167 -        u.task = receivePr->assignCount +1;
  8.1168 -        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
  8.1169 -       #endif
  8.1170 -      
  8.1171 -      resume_slaveVP( senderSlv,   semEnv );
  8.1172 -      resume_slaveVP( receiverSlv, semEnv );
  8.1173 +        u.vp = senderSlv->slaveID;
  8.1174 +        u.task = senderSlv->assignCount;
  8.1175 +        addToListOfArrays(Unit, u, semEnv->ntonGroups[groupId]->senders);
  8.1176 +        u.vp = receiverSlv->slaveID;
  8.1177 +        u.task = receiverSlv->assignCount + 1;
  8.1178 +        addToListOfArrays(Unit, u, semEnv->ntonGroups[groupId]->receivers);
  8.1179 +#endif
  8.1180  
  8.1181 -      return;
  8.1182 +        resume_slaveVP(senderSlv, semEnv);
  8.1183 +        resume_slaveVP(receiverSlv, semEnv);
  8.1184 +
  8.1185 +        return;
  8.1186      }
  8.1187 -   printf("\nLang Impl Error: Should never be two waiting receives!\n");
  8.1188 - }
  8.1189 -
  8.1190 +    printf("\nLang Impl Error: Should never be two waiting receives!\n");
  8.1191 +}
  8.1192  
  8.1193  /*
  8.1194   */
  8.1195  inline void
  8.1196 -handleReceiveFromTo( VSsSemReq *semReq, VSsSemEnv *semEnv)
  8.1197 - { SlaveVP     *senderSlv, *receiverSlv;
  8.1198 -   int32       *senderID,  *receiverID;
  8.1199 -   int32       *key, keySz, receiverIDNumInt, senderIDNumInt;
  8.1200 -   VSsSemReq   *waitingReq;
  8.1201 -   HashEntry   *entry;
  8.1202 -   HashTable   *commHashTbl = semEnv->commHashTbl;
  8.1203 +handleReceiveFromTo(VSsSemReq *semReq, VSsSemEnv *semEnv) {
  8.1204 +    SlaveVP *senderSlv, *receiverSlv;
  8.1205 +    int32 *senderID, *receiverID;
  8.1206 +    int32 *key, keySz, receiverIDNumInt, senderIDNumInt;
  8.1207 +    VSsSemReq *waitingReq;
  8.1208 +    HashEntry *entry;
  8.1209 +    HashTable *commHashTbl = semEnv->commHashTbl;
  8.1210  
  8.1211           DEBUG__printf2(dbgRqstHdlr,"RecFromTo req from ID: %d to ID: %d",semReq->senderID[1],semReq->receiverID[1])
  8.1212 -   
  8.1213 -   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
  8.1214 -   senderID    = semReq->senderID;
  8.1215 -   receiverSlv = semReq->receiverSlv;
  8.1216 +
  8.1217 +    receiverID = semReq->receiverID; //For "send", know both send & recv procrs
  8.1218 +    senderID = semReq->senderID;
  8.1219 +    receiverSlv = semReq->receiverSlv;
  8.1220     
  8.1221        //suspending a task always makes the slave into an extra slot slave,
  8.1222        // because it ends up in the resumeQ, even when resumes immediately.
  8.1223        //Eventually task_end will put the slave into the freeExtraTaskSlvQ
  8.1224     replaceWithNewSlotSlvIfNeeded( receiverSlv, semEnv );
  8.1225  
  8.1226 -   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
  8.1227 -   senderIDNumInt   = senderID[0] + 1;
  8.1228 +    receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
  8.1229 +    senderIDNumInt = senderID[0] + 1;
  8.1230     keySz = (receiverIDNumInt + senderIDNumInt) * sizeof(int32) + sizeof(int32);
  8.1231 -   key = VMS_PI__malloc( keySz );
  8.1232 +    key = VMS_PI__malloc(keySz);
  8.1233     key[0] = receiverIDNumInt + senderIDNumInt; //loc 0 is num int32s in key
  8.1234     memcpy( &key[1], receiverID, receiverIDNumInt * sizeof(int32) );
  8.1235     memcpy( &key[1 + receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32));
  8.1236  
  8.1237 -   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
  8.1238 -   if( entry == NULL ) //was just inserted, means task has to wait
  8.1239 +    entry = giveEntryElseInsertReqst32(key, semReq, commHashTbl);
  8.1240 +    if (entry == NULL) //was just inserted, means task has to wait
  8.1241      { return;
  8.1242      }
  8.1243  
  8.1244 -   waitingReq = (VSsSemReq *)entry->content;
  8.1245 +    waitingReq = (VSsSemReq *) entry->content;
  8.1246  
  8.1247 -      //At this point, know have a request to rendez-vous -- should be send
  8.1248 -   if( waitingReq->reqType == send_from_to )
  8.1249 -    {    //waiting request is a send, so pair it with this receive
  8.1250 -      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  8.1251 +    //At this point, know have a request to rendez-vous -- should be send
  8.1252 +    if (waitingReq->reqType == send_from_to) { //waiting request is a send, so pair it with this receive
  8.1253 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  8.1254          Dependency newd;
  8.1255 -        newd.from_vp = sendPr->slaveID;
  8.1256 -        newd.from_task = sendPr->assignCount;
  8.1257 -        newd.to_vp = receivePr->slaveID;
  8.1258 -        newd.to_task = receivePr->assignCount +1;
  8.1259 +        newd.from_vp = senderSlv->slaveID;
  8.1260 +        newd.from_task = senderSlv->assignCount;
  8.1261 +        newd.to_vp = receiverSlv->slaveID;
  8.1262 +        newd.to_task = receiverSlv->assignCount + 1;
  8.1263          //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
  8.1264 -        addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);    
  8.1265 -      #endif
  8.1266 -      
  8.1267 -         //have receiver slave, now set sender slave
  8.1268 -      senderSlv = waitingReq->senderSlv;
  8.1269 -      
  8.1270 -         //For from-to, should only ever be a single reqst waiting tobe paird
  8.1271 -      entry->content = waitingReq->nextReqInHashEntry;
  8.1272 -      if( entry->content != NULL ) printf("\nERROR in handleRecvFromTo\n");
  8.1273 -      deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees entry too
  8.1274 +        addToListOfArrays(Dependency, newd, semEnv->commDependenciesList);
  8.1275 +#endif
  8.1276  
  8.1277 -         //attach msg that's in the send request to receiving procr
  8.1278 -         // when comes back from suspend, will have msg in dataRetFromReq
  8.1279 -      receiverSlv->dataRetFromReq = waitingReq->msg;
  8.1280 +        //have receiver slave, now set sender slave
  8.1281 +        senderSlv = waitingReq->senderSlv;
  8.1282  
  8.1283 -         //bring both processors back from suspend
  8.1284 -      VMS_PI__free( waitingReq );
  8.1285 +        //For from-to, should only ever be a single reqst waiting tobe paird
  8.1286 +        entry->content = waitingReq->nextReqInHashEntry;
  8.1287 +        if (entry->content != NULL) printf("\nERROR in handleRecvFromTo\n");
  8.1288 +        deleteEntryFromTable32((uint32*) entry->key, commHashTbl); //frees entry too
  8.1289  
  8.1290 -      resume_slaveVP( senderSlv,   semEnv );
  8.1291 -      resume_slaveVP( receiverSlv, semEnv );
  8.1292 +        //attach msg that's in the send request to receiving procr
  8.1293 +        // when comes back from suspend, will have msg in dataRetFromReq
  8.1294 +        receiverSlv->dataRetFromReq = waitingReq->msg;
  8.1295  
  8.1296 -      return;
  8.1297 +        //bring both processors back from suspend
  8.1298 +        VMS_PI__free(waitingReq);
  8.1299 +
  8.1300 +        resume_slaveVP(senderSlv, semEnv);
  8.1301 +        resume_slaveVP(receiverSlv, semEnv);
  8.1302 +
  8.1303 +        return;
  8.1304      }
  8.1305 -   printf("\nLang Impl Error: Should never be two waiting receives!\n");
  8.1306 - }
  8.1307 +    printf("\nLang Impl Error: Should never be two waiting receives!\n");
  8.1308 +}
  8.1309  
  8.1310  //==========================================================================
  8.1311 +
  8.1312  inline void
  8.1313  replaceWithNewSlotSlvIfNeeded( SlaveVP *requestingSlv, VSsSemEnv *semEnv )
  8.1314   { SlaveVP *newSlotSlv;
  8.1315 @@ -911,202 +913,209 @@
  8.1316     if( reqSemData->slaveType != SlotTaskSlv )
  8.1317        return; //already replaced, so just return
  8.1318     
  8.1319 -      //get a new slave to be the slot slave
  8.1320 -   newSlotSlv     = readPrivQ( semEnv->freeExtraTaskSlvQ );
  8.1321 -   if( newSlotSlv == NULL )
  8.1322 -    { newSlotSlv  = VSs__create_slave_helper( &idle_fn, NULL, semEnv, 0);
  8.1323 +    //get a new slave to be the slot slave
  8.1324 +    newSlotSlv = readPrivQ(semEnv->freeExtraTaskSlvQ);
  8.1325 +    if (newSlotSlv == NULL) {
  8.1326 +        newSlotSlv = VSs__create_slave_helper(&idle_fn, NULL, semEnv, 0);
  8.1327           //just made a new extra task slave, so count it
  8.1328        semEnv->numLiveExtraTaskSlvs += 1;
  8.1329      }
  8.1330 -   
  8.1331 -      //set slave values to make it the slot slave
  8.1332 -   semData                        = newSlotSlv->semanticData;
  8.1333 -   semData->taskStub              = NULL;
  8.1334 -   semData->slaveType             = SlotTaskSlv;
  8.1335 -   semData->needsTaskAssigned     = TRUE;
  8.1336 +
  8.1337 +    //set slave values to make it the slot slave
  8.1338 +    semData = newSlotSlv->semanticData;
  8.1339 +    semData->taskStub = NULL;
  8.1340 +    semData->slaveType = SlotTaskSlv;
  8.1341 +    semData->needsTaskAssigned = TRUE;
  8.1342     
  8.1343        //a slot slave is pinned to a particular slot on a particular core
  8.1344 -   newSlotSlv->animSlotAssignedTo = requestingSlv->animSlotAssignedTo;
  8.1345 -   newSlotSlv->coreAnimatedBy     = requestingSlv->coreAnimatedBy;
  8.1346 -    
  8.1347 -      //put it into the slot slave matrix
  8.1348 -   int32 slotNum = requestingSlv->animSlotAssignedTo->slotIdx;
  8.1349 -   int32 coreNum = requestingSlv->coreAnimatedBy;
  8.1350 -   semEnv->slotTaskSlvs[coreNum][slotNum] = newSlotSlv;
  8.1351 +    newSlotSlv->animSlotAssignedTo = requestingSlv->animSlotAssignedTo;
  8.1352 +    newSlotSlv->coreAnimatedBy = requestingSlv->coreAnimatedBy;
  8.1353  
  8.1354 -      //Fix up requester, to be an extra slave now (but not a free one)
  8.1355 -      // because it's not free, doesn't go into freeExtraTaskSlvQ
  8.1356 -   semData = requestingSlv->semanticData;
  8.1357 -   semData->slaveType = ExtraTaskSlv;
  8.1358 - }
  8.1359 +    //put it into the slot slave matrix
  8.1360 +    int32 slotNum = requestingSlv->animSlotAssignedTo->slotIdx;
  8.1361 +    int32 coreNum = requestingSlv->coreAnimatedBy;
  8.1362 +    semEnv->slotTaskSlvs[coreNum][slotNum] = newSlotSlv;
  8.1363 +
  8.1364 +    //Fix up requester, to be an extra slave now (but not a free one)
  8.1365 +    // because it's not free, doesn't go into freeExtraTaskSlvQ
  8.1366 +    semData = requestingSlv->semanticData;
  8.1367 +    semData->slaveType = ExtraTaskSlv;
  8.1368 +}
  8.1369  
  8.1370  inline void
  8.1371 -handleTaskwait( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv)
  8.1372 - { VSsTaskStub* requestingTaskStub;
  8.1373 -   VSsSemData* semData;
  8.1374 -         DEBUG__printf1(dbgRqstHdlr,"Taskwait request from processor %d",
  8.1375 -                                                      requestingSlv->slaveID)
  8.1376 -    
  8.1377 -   semData = (VSsSemData *)semReq->callingSlv->semanticData;
  8.1378 -   requestingTaskStub = semData->taskStub;
  8.1379 -   
  8.1380 -   if( semData->taskStub->numLiveChildTasks == 0 )
  8.1381 -    {    //nobody to wait for, resume
  8.1382 -      resume_slaveVP( requestingSlv, semEnv );
  8.1383 +handleTaskwait(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
  8.1384 +    VSsTaskStub* requestingTaskStub;
  8.1385 +    VSsSemData* semData;
  8.1386 +    DEBUG__printf1(dbgRqstHdlr, "Taskwait request from processor %d",
  8.1387 +            requestingSlv->slaveID)
  8.1388 +
  8.1389 +            semData = (VSsSemData *) semReq->callingSlv->semanticData;
  8.1390 +    requestingTaskStub = semData->taskStub;
  8.1391 +
  8.1392 +    if (semData->taskStub->numLiveChildTasks == 0) { //nobody to wait for, resume
  8.1393 +        resume_slaveVP(requestingSlv, semEnv);
  8.1394 +    } else //have to wait, replace requester with new slot slv & mark waiting
  8.1395 +    {
  8.1396 +        if (semData->slaveType == SlotTaskSlv) {
  8.1397 +         replaceWithNewSlotSlvIfNeeded( requestingSlv, semEnv );
  8.1398 +        }
  8.1399 +
  8.1400 +        requestingTaskStub->isWaitingForChildTasksToEnd = TRUE;
  8.1401      }
  8.1402 -   else  //have to wait, replace requester with new slot slv & mark waiting
  8.1403 -    { 
  8.1404 -       if(semData->slaveType == SlotTaskSlv){
  8.1405 -         replaceWithNewSlotSlvIfNeeded( requestingSlv, semEnv );
  8.1406 -       }
  8.1407 -       
  8.1408 -      requestingTaskStub->isWaitingForChildTasksToEnd = TRUE;
  8.1409 -    }    
  8.1410 - }
  8.1411 +}
  8.1412  
  8.1413  
  8.1414  //==========================================================================
  8.1415 -/*
  8.1416 - */
  8.1417 -void
  8.1418 -handleMalloc( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
  8.1419 - { void *ptr;
  8.1420 - 
  8.1421 -      DEBUG__printf1(dbgRqstHdlr,"Malloc request from processor %d",requestingSlv->slaveID)
  8.1422 -
  8.1423 -   ptr = VMS_PI__malloc( semReq->sizeToMalloc );
  8.1424 -   requestingSlv->dataRetFromReq = ptr;
  8.1425 -   resume_slaveVP( requestingSlv, semEnv );
  8.1426 - }
  8.1427  
  8.1428  /*
  8.1429   */
  8.1430  void
  8.1431 -handleFree( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
  8.1432 - {
  8.1433 -         DEBUG__printf1(dbgRqstHdlr,"Free request from processor %d",requestingSlv->slaveID)
  8.1434 -   VMS_PI__free( semReq->ptrToFree );
  8.1435 -   resume_slaveVP( requestingSlv, semEnv );
  8.1436 - }
  8.1437 +handleMalloc(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
  8.1438 +    void *ptr;
  8.1439 +
  8.1440 +    DEBUG__printf1(dbgRqstHdlr, "Malloc request from processor %d", requestingSlv->slaveID)
  8.1441 +
  8.1442 +    ptr = VMS_PI__malloc(semReq->sizeToMalloc);
  8.1443 +    requestingSlv->dataRetFromReq = ptr;
  8.1444 +    resume_slaveVP(requestingSlv, semEnv);
  8.1445 +}
  8.1446 +
  8.1447 +/*
  8.1448 + */
  8.1449 +void
  8.1450 +handleFree(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
  8.1451 +    DEBUG__printf1(dbgRqstHdlr, "Free request from processor %d", requestingSlv->slaveID)
  8.1452 +    VMS_PI__free(semReq->ptrToFree);
  8.1453 +    resume_slaveVP(requestingSlv, semEnv);
  8.1454 +}
  8.1455  
  8.1456  
  8.1457  //===========================================================================
  8.1458  //
  8.1459 +
  8.1460  /*Uses ID as index into array of flags.  If flag already set, resumes from
  8.1461   * end-label.  Else, sets flag and resumes normally.
  8.1462   */
  8.1463  void inline
  8.1464 -handleStartSingleton_helper( VSsSingleton *singleton, SlaveVP *reqstingSlv,
  8.1465 -                             VSsSemEnv    *semEnv )
  8.1466 - {
  8.1467 -   if( singleton->hasFinished )
  8.1468 -    {    //the code that sets the flag to true first sets the end instr addr
  8.1469 -      reqstingSlv->dataRetFromReq = singleton->endInstrAddr;
  8.1470 -      resume_slaveVP( reqstingSlv, semEnv );
  8.1471 -      return;
  8.1472 +handleStartSingleton_helper(VSsSingleton *singleton, SlaveVP *reqstingSlv,
  8.1473 +        VSsSemEnv *semEnv) {
  8.1474 +    if (singleton->hasFinished) { //the code that sets the flag to true first sets the end instr addr
  8.1475 +        reqstingSlv->dataRetFromReq = singleton->endInstrAddr;
  8.1476 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  8.1477 +        Dependency newd;
  8.1478 +        newd.from_vp = singleton->executingVp;
  8.1479 +        newd.from_task = singleton->executingTask;
  8.1480 +        newd.to_vp = reqstingSlv->slaveID;
  8.1481 +        newd.to_task = reqstingSlv->assignCount + 1;
  8.1482 +        addToListOfArrays(Dependency, newd, semEnv->singletonDependenciesList);
  8.1483 +#endif  
  8.1484 +        resume_slaveVP(reqstingSlv, semEnv);
  8.1485 +        return;
  8.1486 +    } else if (singleton->hasBeenStarted) { //singleton is in-progress in a diff slave, so wait for it to finish
  8.1487 +        writePrivQ(reqstingSlv, singleton->waitQ);
  8.1488 +        return;
  8.1489 +    } else { //hasn't been started, so this is the first attempt at the singleton
  8.1490 +        singleton->hasBeenStarted = TRUE;
  8.1491 +        reqstingSlv->dataRetFromReq = 0x0;
  8.1492 +        resume_slaveVP(reqstingSlv, semEnv);
  8.1493 +        return;
  8.1494      }
  8.1495 -   else if( singleton->hasBeenStarted )
  8.1496 -    {    //singleton is in-progress in a diff slave, so wait for it to finish
  8.1497 -      writePrivQ(reqstingSlv, singleton->waitQ );
  8.1498 -      return;
  8.1499 -    }
  8.1500 -   else
  8.1501 -    {    //hasn't been started, so this is the first attempt at the singleton
  8.1502 -      singleton->hasBeenStarted = TRUE;
  8.1503 -      reqstingSlv->dataRetFromReq = 0x0;
  8.1504 -      resume_slaveVP( reqstingSlv, semEnv );
  8.1505 -      return;
  8.1506 -    }
  8.1507 - }
  8.1508 -void inline
  8.1509 -handleStartFnSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1510 -                      VSsSemEnv *semEnv )
  8.1511 - { VSsSingleton *singleton;
  8.1512 -         DEBUG__printf1(dbgRqstHdlr,"StartFnSingleton request from processor %d",requestingSlv->slaveID)
  8.1513 -
  8.1514 -   singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
  8.1515 -   handleStartSingleton_helper( singleton, requestingSlv, semEnv );
  8.1516 - }
  8.1517 -void inline
  8.1518 -handleStartDataSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1519 -                      VSsSemEnv *semEnv )
  8.1520 - { VSsSingleton *singleton;
  8.1521 -
  8.1522 -         DEBUG__printf1(dbgRqstHdlr,"StartDataSingleton request from processor %d",requestingSlv->slaveID)
  8.1523 -   if( *(semReq->singletonPtrAddr) == NULL )
  8.1524 -    { singleton                 = VMS_PI__malloc( sizeof(VSsSingleton) );
  8.1525 -      singleton->waitQ          = makeVMSQ();
  8.1526 -      singleton->endInstrAddr   = 0x0;
  8.1527 -      singleton->hasBeenStarted = FALSE;
  8.1528 -      singleton->hasFinished    = FALSE;
  8.1529 -      *(semReq->singletonPtrAddr)  = singleton;
  8.1530 -    }
  8.1531 -   else
  8.1532 -      singleton = *(semReq->singletonPtrAddr);
  8.1533 -   handleStartSingleton_helper( singleton, requestingSlv, semEnv );
  8.1534 - }
  8.1535 -
  8.1536 +}
  8.1537  
  8.1538  void inline
  8.1539 -handleEndSingleton_helper( VSsSingleton *singleton, SlaveVP *requestingSlv,
  8.1540 -                           VSsSemEnv    *semEnv )
  8.1541 - { PrivQueueStruc *waitQ;
  8.1542 -   int32           numWaiting, i;
  8.1543 -   SlaveVP      *resumingSlv;
  8.1544 +handleStartFnSingleton(VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1545 +        VSsSemEnv *semEnv) {
  8.1546 +    VSsSingleton *singleton;
  8.1547 +    DEBUG__printf1(dbgRqstHdlr, "StartFnSingleton request from processor %d", requestingSlv->slaveID)
  8.1548  
  8.1549 -   if( singleton->hasFinished )
  8.1550 -    { //by definition, only one slave should ever be able to run end singleton
  8.1551 -      // so if this is true, is an error
  8.1552 -      ERROR1( "singleton code ran twice", requestingSlv );
  8.1553 +    singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
  8.1554 +    handleStartSingleton_helper(singleton, requestingSlv, semEnv);
  8.1555 +}
  8.1556 +
  8.1557 +void inline
  8.1558 +handleStartDataSingleton(VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1559 +        VSsSemEnv *semEnv) {
  8.1560 +    VSsSingleton *singleton;
  8.1561 +
  8.1562 +    DEBUG__printf1(dbgRqstHdlr, "StartDataSingleton request from processor %d", requestingSlv->slaveID)
  8.1563 +    if (*(semReq->singletonPtrAddr) == NULL) {
  8.1564 +        singleton = VMS_PI__malloc(sizeof (VSsSingleton));
  8.1565 +        singleton->waitQ = makeVMSQ();
  8.1566 +        singleton->endInstrAddr = 0x0;
  8.1567 +        singleton->hasBeenStarted = FALSE;
  8.1568 +        singleton->hasFinished = FALSE;
  8.1569 +        *(semReq->singletonPtrAddr) = singleton;
  8.1570 +    } else
  8.1571 +        singleton = *(semReq->singletonPtrAddr);
  8.1572 +    handleStartSingleton_helper(singleton, requestingSlv, semEnv);
  8.1573 +}
  8.1574 +
  8.1575 +void inline
  8.1576 +handleEndSingleton_helper(VSsSingleton *singleton, SlaveVP *requestingSlv,
  8.1577 +        VSsSemEnv *semEnv) {
  8.1578 +    PrivQueueStruc *waitQ;
  8.1579 +    int32 numWaiting, i;
  8.1580 +    SlaveVP *resumingSlv;
  8.1581 +
  8.1582 +    if (singleton->hasFinished) { //by definition, only one slave should ever be able to run end singleton
  8.1583 +        // so if this is true, is an error
  8.1584 +        ERROR1("singleton code ran twice", requestingSlv);
  8.1585      }
  8.1586  
  8.1587 -   singleton->hasFinished = TRUE;
  8.1588 -   waitQ = singleton->waitQ;
  8.1589 -   numWaiting = numInPrivQ( waitQ );
  8.1590 -   for( i = 0; i < numWaiting; i++ )
  8.1591 -    {    //they will resume inside start singleton, then jmp to end singleton
  8.1592 -      resumingSlv = readPrivQ( waitQ );
  8.1593 -      resumingSlv->dataRetFromReq = singleton->endInstrAddr;
  8.1594 -      resume_slaveVP( resumingSlv, semEnv );
  8.1595 +    singleton->hasFinished = TRUE;
  8.1596 +    singleton->executingVp = requestingSlv->slaveID;
  8.1597 +    singleton->executingTask = requestingSlv->assignCount;
  8.1598 +    waitQ = singleton->waitQ;
  8.1599 +    numWaiting = numInPrivQ(waitQ);
  8.1600 +    for (i = 0; i < numWaiting; i++) { //they will resume inside start singleton, then jmp to end singleton
  8.1601 +        resumingSlv = readPrivQ(waitQ);
  8.1602 +        resumingSlv->dataRetFromReq = singleton->endInstrAddr;
  8.1603 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
  8.1604 +        Dependency newd;
  8.1605 +        newd.from_vp = singleton->executingVp;
  8.1606 +        newd.from_task = singleton->executingTask;
  8.1607 +        newd.to_vp = resumingSlv->slaveID;
  8.1608 +        newd.to_task = resumingSlv->assignCount + 1;
  8.1609 +        addToListOfArrays(Dependency, newd, semEnv->singletonDependenciesList);
  8.1610 +#endif 
  8.1611 +        resume_slaveVP(resumingSlv, semEnv);
  8.1612      }
  8.1613  
  8.1614 -   resume_slaveVP( requestingSlv, semEnv );
  8.1615 +    resume_slaveVP(requestingSlv, semEnv);
  8.1616  
  8.1617  }
  8.1618 +
  8.1619  void inline
  8.1620 -handleEndFnSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1621 -                        VSsSemEnv *semEnv )
  8.1622 - {
  8.1623 -   VSsSingleton   *singleton;
  8.1624 +handleEndFnSingleton(VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1625 +        VSsSemEnv *semEnv) {
  8.1626 +    VSsSingleton *singleton;
  8.1627  
  8.1628 -         DEBUG__printf1(dbgRqstHdlr,"EndFnSingleton request from processor %d",requestingSlv->slaveID)
  8.1629 -   
  8.1630 -   singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
  8.1631 -   handleEndSingleton_helper( singleton, requestingSlv, semEnv );
  8.1632 -  }
  8.1633 +    DEBUG__printf1(dbgRqstHdlr, "EndFnSingleton request from processor %d", requestingSlv->slaveID)
  8.1634 +
  8.1635 +    singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
  8.1636 +    handleEndSingleton_helper(singleton, requestingSlv, semEnv);
  8.1637 +}
  8.1638 +
  8.1639  void inline
  8.1640 -handleEndDataSingleton( VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1641 -                        VSsSemEnv *semEnv )
  8.1642 - {
  8.1643 -   VSsSingleton   *singleton;
  8.1644 +handleEndDataSingleton(VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1645 +        VSsSemEnv *semEnv) {
  8.1646 +    VSsSingleton *singleton;
  8.1647  
  8.1648 -         DEBUG__printf1(dbgRqstHdlr,"EndDataSingleton request from processor %d",requestingSlv->slaveID)
  8.1649 -   
  8.1650 -   singleton = *(semReq->singletonPtrAddr);
  8.1651 -   handleEndSingleton_helper( singleton, requestingSlv, semEnv );
  8.1652 -  }
  8.1653 +    DEBUG__printf1(dbgRqstHdlr, "EndDataSingleton request from processor %d", requestingSlv->slaveID)
  8.1654  
  8.1655 +    singleton = *(semReq->singletonPtrAddr);
  8.1656 +    handleEndSingleton_helper(singleton, requestingSlv, semEnv);
  8.1657 +}
  8.1658  
  8.1659  /*This executes the function in the masterVP, take the function
  8.1660   * pointer out of the request and call it, then resume the VP.
  8.1661   */
  8.1662  void
  8.1663 -handleAtomic( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
  8.1664 - {
  8.1665 -         DEBUG__printf1(dbgRqstHdlr,"Atomic request from processor %d",requestingSlv->slaveID)
  8.1666 -   semReq->fnToExecInMaster( semReq->dataForFn );
  8.1667 -   resume_slaveVP( requestingSlv, semEnv );
  8.1668 - }
  8.1669 +handleAtomic(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
  8.1670 +    DEBUG__printf1(dbgRqstHdlr, "Atomic request from processor %d", requestingSlv->slaveID)
  8.1671 +    semReq->fnToExecInMaster(semReq->dataForFn);
  8.1672 +    resume_slaveVP(requestingSlv, semEnv);
  8.1673 +}
  8.1674  
  8.1675  /*First, it looks at the VP's semantic data, to see the highest transactionID
  8.1676   * that VP
  8.1677 @@ -1123,44 +1132,39 @@
  8.1678   *If NULL, then write requesting into the field and resume.
  8.1679   */
  8.1680  void
  8.1681 -handleTransStart( VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1682 -                  VSsSemEnv *semEnv )
  8.1683 - { VSsSemData *semData;
  8.1684 -   TransListElem *nextTransElem;
  8.1685 +handleTransStart(VSsSemReq *semReq, SlaveVP *requestingSlv,
  8.1686 +        VSsSemEnv *semEnv) {
  8.1687 +    VSsSemData *semData;
  8.1688 +    TransListElem *nextTransElem;
  8.1689  
  8.1690 -         DEBUG__printf1(dbgRqstHdlr,"TransStart request from processor %d",requestingSlv->slaveID)
  8.1691 -   
  8.1692 -      //check ordering of entering transactions is correct
  8.1693 -   semData = requestingSlv->semanticData;
  8.1694 -   if( semData->highestTransEntered > semReq->transID )
  8.1695 -    {    //throw VMS exception, which shuts down VMS.
  8.1696 -      VMS_PI__throw_exception( "transID smaller than prev", requestingSlv, NULL);
  8.1697 +    DEBUG__printf1(dbgRqstHdlr, "TransStart request from processor %d", requestingSlv->slaveID)
  8.1698 +
  8.1699 +    //check ordering of entering transactions is correct
  8.1700 +    semData = requestingSlv->semanticData;
  8.1701 +    if (semData->highestTransEntered > semReq->transID) { //throw VMS exception, which shuts down VMS.
  8.1702 +        VMS_PI__throw_exception("transID smaller than prev", requestingSlv, NULL);
  8.1703      }
  8.1704 -      //add this trans ID to the list of transactions entered -- check when
  8.1705 -      // end a transaction
  8.1706 -   semData->highestTransEntered = semReq->transID;
  8.1707 -   nextTransElem = VMS_PI__malloc( sizeof(TransListElem) );
  8.1708 -   nextTransElem->transID = semReq->transID;
  8.1709 -   nextTransElem->nextTrans = semData->lastTransEntered;
  8.1710 -   semData->lastTransEntered = nextTransElem;
  8.1711 +    //add this trans ID to the list of transactions entered -- check when
  8.1712 +    // end a transaction
  8.1713 +    semData->highestTransEntered = semReq->transID;
  8.1714 +    nextTransElem = VMS_PI__malloc(sizeof (TransListElem));
  8.1715 +    nextTransElem->transID = semReq->transID;
  8.1716 +    nextTransElem->nextTrans = semData->lastTransEntered;
  8.1717 +    semData->lastTransEntered = nextTransElem;
  8.1718  
  8.1719 -      //get the structure for this transaction ID
  8.1720 -   VSsTrans *
  8.1721 -   transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
  8.1722 +    //get the structure for this transaction ID
  8.1723 +    VSsTrans *
  8.1724 +            transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
  8.1725  
  8.1726 -   if( transStruc->VPCurrentlyExecuting == NULL )
  8.1727 -    {
  8.1728 -      transStruc->VPCurrentlyExecuting = requestingSlv;
  8.1729 -      resume_slaveVP( requestingSlv, semEnv );
  8.1730 +    if (transStruc->VPCurrentlyExecuting == NULL) {
  8.1731 +        transStruc->VPCurrentlyExecuting = requestingSlv;
  8.1732 +        resume_slaveVP(requestingSlv, semEnv);
  8.1733 +    } else { //note, might make future things cleaner if save request with VP and
  8.1734 +        // add this trans ID to the linked list when gets out of queue.
  8.1735 +        // but don't need for now, and lazy..
  8.1736 +        writePrivQ(requestingSlv, transStruc->waitingVPQ);
  8.1737      }
  8.1738 -   else
  8.1739 -    {    //note, might make future things cleaner if save request with VP and
  8.1740 -         // add this trans ID to the linked list when gets out of queue.
  8.1741 -         // but don't need for now, and lazy..
  8.1742 -      writePrivQ( requestingSlv, transStruc->waitingVPQ );
  8.1743 -    }
  8.1744 - }
  8.1745 -
  8.1746 +}
  8.1747  
  8.1748  /*Use the trans ID to get the transaction structure from the array.
  8.1749   *Look at VP_currently_executing to be sure it's same as requesting VP.
  8.1750 @@ -1176,38 +1180,36 @@
  8.1751   * resume both.
  8.1752   */
  8.1753  void
  8.1754 -handleTransEnd(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv)
  8.1755 - { VSsSemData    *semData;
  8.1756 -   SlaveVP     *waitingSlv;
  8.1757 -   VSsTrans      *transStruc;
  8.1758 -   TransListElem *lastTrans;
  8.1759 -   
  8.1760 -         DEBUG__printf1(dbgRqstHdlr,"TransEnd request from processor %d",requestingSlv->slaveID)
  8.1761 -   
  8.1762 -   transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
  8.1763 +handleTransEnd(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
  8.1764 +    VSsSemData *semData;
  8.1765 +    SlaveVP *waitingSlv;
  8.1766 +    VSsTrans *transStruc;
  8.1767 +    TransListElem *lastTrans;
  8.1768  
  8.1769 -      //make sure transaction ended in same VP as started it.
  8.1770 -   if( transStruc->VPCurrentlyExecuting != requestingSlv )
  8.1771 -    {
  8.1772 -      VMS_PI__throw_exception( "trans ended in diff VP", requestingSlv, NULL );
  8.1773 +    DEBUG__printf1(dbgRqstHdlr, "TransEnd request from processor %d", requestingSlv->slaveID)
  8.1774 +
  8.1775 +    transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
  8.1776 +
  8.1777 +    //make sure transaction ended in same VP as started it.
  8.1778 +    if (transStruc->VPCurrentlyExecuting != requestingSlv) {
  8.1779 +        VMS_PI__throw_exception("trans ended in diff VP", requestingSlv, NULL);
  8.1780      }
  8.1781  
  8.1782 -      //make sure nesting is correct -- last ID entered should == this ID
  8.1783 -   semData = requestingSlv->semanticData;
  8.1784 -   lastTrans = semData->lastTransEntered;
  8.1785 -   if( lastTrans->transID != semReq->transID )
  8.1786 -    {
  8.1787 -      VMS_PI__throw_exception( "trans incorrectly nested", requestingSlv, NULL );
  8.1788 +    //make sure nesting is correct -- last ID entered should == this ID
  8.1789 +    semData = requestingSlv->semanticData;
  8.1790 +    lastTrans = semData->lastTransEntered;
  8.1791 +    if (lastTrans->transID != semReq->transID) {
  8.1792 +        VMS_PI__throw_exception("trans incorrectly nested", requestingSlv, NULL);
  8.1793      }
  8.1794  
  8.1795 -   semData->lastTransEntered = semData->lastTransEntered->nextTrans;
  8.1796 +    semData->lastTransEntered = semData->lastTransEntered->nextTrans;
  8.1797  
  8.1798  
  8.1799 -   waitingSlv = readPrivQ( transStruc->waitingVPQ );
  8.1800 -   transStruc->VPCurrentlyExecuting = waitingSlv;
  8.1801 +    waitingSlv = readPrivQ(transStruc->waitingVPQ);
  8.1802 +    transStruc->VPCurrentlyExecuting = waitingSlv;
  8.1803  
  8.1804 -   if( waitingSlv != NULL )
  8.1805 -      resume_slaveVP( waitingSlv, semEnv );
  8.1806 +    if (waitingSlv != NULL)
  8.1807 +        resume_slaveVP(waitingSlv, semEnv);
  8.1808  
  8.1809 -   resume_slaveVP( requestingSlv, semEnv );
  8.1810 - }
  8.1811 +    resume_slaveVP(requestingSlv, semEnv);
  8.1812 +}