changeset 21:feea343d202f dev_expl_VP_and_DKU

add support for more OmpSs features
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Mon, 29 Oct 2012 16:57:56 +0100
parents a7ca8f45c1c4
children b787a5234406
files VSs.c VSs.h VSs_PluginFns.c VSs_Request_Handlers.c VSs_Request_Handlers.h
diffstat 5 files changed, 242 insertions(+), 23 deletions(-) [+]
line diff
     1.1 --- a/VSs.c	Tue Sep 25 16:12:40 2012 +0200
     1.2 +++ b/VSs.c	Mon Oct 29 16:57:56 2012 +0100
     1.3 @@ -253,6 +253,12 @@
     1.4        semanticEnv->fnSingletons[i].hasFinished       = FALSE;
     1.5        semanticEnv->fnSingletons[i].waitQ             = makeVMSQ();
     1.6        semanticEnv->transactionStrucs[i].waitingVPQ   = makeVMSQ();
     1.7 +      semanticEnv->criticalSection[i].isOccupied     = FALSE;
     1.8 +      semanticEnv->criticalSection[i].waitQ          = makeVMSQ();
     1.9 +#ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    1.10 +      semanticEnv->criticalSection[i].previous.vp    = 0;
    1.11 +      semanticEnv->criticalSection[i].previous.task  = 0;
    1.12 +#endif
    1.13      }
    1.14  
    1.15     semanticEnv->numLiveExtraTaskSlvs   = 0; //must be last
    1.16 @@ -543,7 +549,41 @@
    1.17     VMS_WL__send_sem_request( &reqData, animSlv );
    1.18  }
    1.19  
    1.20 +void
    1.21 +VSs__taskwait_on(SlaveVP *animSlv,void* ptr){
    1.22 +    VSsSemReq  reqData;
    1.23  
    1.24 +   reqData.reqType      = taskwait_on;
    1.25 +   reqData.callingSlv   = animSlv;
    1.26 +   
    1.27 +   reqData.args = ptr;
    1.28 +   
    1.29 +   VMS_WL__send_sem_request( &reqData, animSlv );
    1.30 +}
    1.31 +
    1.32 +void
    1.33 +VSs__start_critical(SlaveVP *animSlv,int32 name){
    1.34 +    VSsSemReq  reqData;
    1.35 +
    1.36 +   reqData.reqType      = critical_start;
    1.37 +   reqData.callingSlv   = animSlv;
    1.38 +   
    1.39 +   reqData.criticalID = name;
    1.40 +   
    1.41 +   VMS_WL__send_sem_request( &reqData, animSlv );
    1.42 +}
    1.43 +
    1.44 +void
    1.45 +VSs__end_critical(SlaveVP *animSlv,int32 name){
    1.46 +    VSsSemReq  reqData;
    1.47 +
    1.48 +   reqData.reqType      = critical_end;
    1.49 +   reqData.callingSlv   = animSlv;
    1.50 +   
    1.51 +   reqData.criticalID = name;
    1.52 +   
    1.53 +   VMS_WL__send_sem_request( &reqData, animSlv );
    1.54 +}
    1.55  
    1.56  //==========================  send and receive ============================
    1.57  //
     2.1 --- a/VSs.h	Tue Sep 25 16:12:40 2012 +0200
     2.2 +++ b/VSs.h	Mon Oct 29 16:57:56 2012 +0100
     2.3 @@ -92,6 +92,7 @@
     2.4     bool32       isEnded;
     2.5     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
     2.6     Unit parentUnit;
     2.7 +   Unit firstOfTask;
     2.8     #endif
     2.9   }
    2.10  VSsTaskStub;
    2.11 @@ -102,6 +103,7 @@
    2.12     VSsTaskStub *taskStub;
    2.13     int32        argNum;
    2.14     int32        isReader;
    2.15 +   bool32       isSuspended;
    2.16   }
    2.17  VSsTaskStubCarrier;
    2.18  
    2.19 @@ -131,6 +133,16 @@
    2.20   }
    2.21  VSsSingleton;
    2.22  
    2.23 +typedef struct
    2.24 + {
    2.25 +   int32           isOccupied;
    2.26 +   PrivQueueStruc *waitQ;
    2.27 +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    2.28 +   Unit            previous;
    2.29 +#endif
    2.30 + }
    2.31 +VSsCritical;
    2.32 +
    2.33  enum VSsReqType
    2.34   {
    2.35     submit_task = 1,
    2.36 @@ -145,6 +157,9 @@
    2.37     receive_from_to,
    2.38     //===============================
    2.39     taskwait,
    2.40 +   taskwait_on,
    2.41 +   critical_start,
    2.42 +   critical_end,
    2.43     malloc_req,
    2.44     free_req,
    2.45     singleton_fn_start,
    2.46 @@ -186,6 +201,7 @@
    2.47     void              *dataForFn;
    2.48  
    2.49     int32              transID;
    2.50 +   int32              criticalID;
    2.51   }
    2.52  /* VSsSemReq */;
    2.53  
    2.54 @@ -206,6 +222,7 @@
    2.55                         //fix limit on num with dynArray
    2.56     VSsSingleton     fnSingletons[NUM_STRUCS_IN_SEM_ENV];
    2.57     VSsTrans         transactionStrucs[NUM_STRUCS_IN_SEM_ENV];
    2.58 +   VSsCritical      criticalSection[NUM_STRUCS_IN_SEM_ENV];
    2.59  
    2.60     bool32          *coreIsDone;
    2.61     int32            numCoresDone;
    2.62 @@ -319,6 +336,14 @@
    2.63  void
    2.64  VSs__taskwait(SlaveVP *animSlv);
    2.65  
    2.66 +void
    2.67 +VSs__taskwait_on(SlaveVP *animSlv,void* ptr);
    2.68 +
    2.69 +void
    2.70 +VSs__start_critical(SlaveVP *animSlv,int32 name);
    2.71 +
    2.72 +void
    2.73 +VSs__end_critical(SlaveVP *animSlv,int32 name);
    2.74  
    2.75  int32 *
    2.76  VSs__give_self_taskID( SlaveVP *animSlv );
     3.1 --- a/VSs_PluginFns.c	Tue Sep 25 16:12:40 2012 +0200
     3.2 +++ b/VSs_PluginFns.c	Mon Oct 29 16:57:56 2012 +0100
     3.3 @@ -79,7 +79,9 @@
     3.4      }
     3.5     
     3.6  #ifdef EXTERNAL_SCHEDULER
     3.7 -    VSs__get_ready_tasks_from_ext(semEnv->taskReadyQ);
     3.8 +    if(isEmptyPrivQ(semEnv->taskReadyQ)){
     3.9 +        VSs__get_ready_tasks_from_ext(semEnv->taskReadyQ);
    3.10 +    }
    3.11  #endif
    3.12        //If none, speculate will have a task, so get the slot slave
    3.13        //TODO: false sharing ?  (think not bad cause mostly read..)
    3.14 @@ -97,6 +99,8 @@
    3.15          newTaskStub->slaveAssignedTo = returnSlv;
    3.16          semData->needsTaskAssigned = FALSE;
    3.17  #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    3.18 +        newTaskStub->firstOfTask.task = returnSlv->assignCount+1;
    3.19 +        newTaskStub->firstOfTask.vp = returnSlv->slaveID;
    3.20          Dependency newd;
    3.21          newd.from_vp = newTaskStub->parentUnit.vp;
    3.22          newd.from_task = newTaskStub->parentUnit.task;
    3.23 @@ -251,7 +255,12 @@
    3.24              break;
    3.25          case taskwait: handleTaskwait(semReq, reqSlv, semEnv);
    3.26              break;
    3.27 -
    3.28 +        case taskwait_on: handleTaskwaitOn(semReq, reqSlv, semEnv);
    3.29 +            break;
    3.30 +        case critical_start: handleCriticalStart(semReq, reqSlv, semEnv);
    3.31 +            break;
    3.32 +        case critical_end: handleCriticalEnd(semReq, reqSlv, semEnv);
    3.33 +            break;
    3.34              //====================================================================
    3.35          case malloc_req: handleMalloc(semReq, reqSlv, semEnv);
    3.36              break;
    3.37 @@ -271,7 +280,7 @@
    3.38              break;
    3.39          case trans_end: handleTransEnd(semReq, reqSlv, semEnv);
    3.40              break;
    3.41 -        default:
    3.42 +        default: VMS_PI__throw_exception("Unknown request type", reqSlv, NULL);
    3.43              break;
    3.44      }
    3.45  }
     4.1 --- a/VSs_Request_Handlers.c	Tue Sep 25 16:12:40 2012 +0200
     4.2 +++ b/VSs_Request_Handlers.c	Mon Oct 29 16:57:56 2012 +0100
     4.3 @@ -117,6 +117,7 @@
     4.4      newCarrier->taskStub = taskStub;
     4.5      newCarrier->argNum = argNum;
     4.6      newCarrier->isReader = rdOrWrite == READER;
     4.7 +    newCarrier->isSuspended = FALSE;
     4.8      
     4.9      return newCarrier;
    4.10  }
    4.11 @@ -224,7 +225,7 @@
    4.12   */
    4.13  void
    4.14  handleSubmitTask(VSsSemReq *semReq, VSsSemEnv *semEnv) {
    4.15 -    uint32 key[3];
    4.16 +    uint32 key[5];
    4.17      HashEntry *rawHashEntry; //has char *, but use with uint32 *
    4.18      VSsPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
    4.19      void **args;
    4.20 @@ -276,9 +277,10 @@
    4.21       */
    4.22      int32 argNum;
    4.23      for (argNum = 0; argNum < taskType->numCtldArgs; argNum++) {
    4.24 -        key[0] = 2; //two 32b values in key
    4.25 +        key[0] = 4; //two 32b values in key
    4.26          *((uint64*) & key[1]) = (uint64) args[argNum]; //write 64b into two 32b
    4.27 -
    4.28 +        *((uint64*) & key[3]) = (uint64) taskStub->parentTaskStub ;
    4.29 +        
    4.30          /*If the hash entry was chained, put it at the
    4.31           * start of the chain.  (Means no-longer-used pointers accumulate
    4.32           * at end of chain, decide garbage collection later) */
    4.33 @@ -305,8 +307,15 @@
    4.34               * task-stub into the readyQ.  At the same time, increment
    4.35               * the hash-entry's count of enabled and non-finished readers.*/
    4.36                  taskStub->numBlockingProp -= 1;
    4.37 +                if(taskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"Reader %d now on ptrEntry %p; ",taskStub->taskID[1],ptrEntry) }
    4.38 +                else {DEBUG__printf2(dbgRqstHdlr,"Reader %p now on ptrEntry %p; ",taskStub,ptrEntry)}
    4.39                  if (taskStub->numBlockingProp == 0) {
    4.40                      writePrivQ(taskStub, semEnv->taskReadyQ);
    4.41 +                    if(taskStub->taskID) {   DEBUG__printf1(dbgRqstHdlr,"reader %d started; ",taskStub->taskID[1]) }
    4.42 +                    else { DEBUG__printf1(dbgRqstHdlr,"reader %p started; ",taskStub) }
    4.43 +                } else {
    4.44 +                    if(taskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"reader %d still blocked on %d args; ",taskStub->taskID[1],taskStub->numBlockingProp) }
    4.45 +                    else {DEBUG__printf2(dbgRqstHdlr,"reader %p still blocked on %d args; ",taskStub,taskStub->numBlockingProp)}
    4.46                  }
    4.47                  ptrEntry->numEnabledNonDoneReaders += 1;
    4.48              } else { /*Otherwise, the reader is put into the hash-entry's Q of
    4.49 @@ -324,8 +333,15 @@
    4.50                * task-stub. If the count is zero, then put the task-stub
    4.51                * into the readyQ.*/
    4.52                  taskStub->numBlockingProp -= 1;
    4.53 +                if(taskStub->taskID) {   DEBUG__printf2(dbgRqstHdlr,"writer %d takes ptrEntry %p; ",taskStub->taskID[1],ptrEntry) }
    4.54 +                else { DEBUG__printf2(dbgRqstHdlr,"writer %p takes ptrEntry %p; ",taskStub,ptrEntry)}
    4.55                  if (taskStub->numBlockingProp == 0) {
    4.56                      writePrivQ(taskStub, semEnv->taskReadyQ);
    4.57 +                        if(taskStub->taskID) {   DEBUG__printf1(dbgRqstHdlr,"writer %d started; ",taskStub->taskID[1]) }
    4.58 +                        else { DEBUG__printf1(dbgRqstHdlr,"writer %p started; ",taskStub) }
    4.59 +                } else {
    4.60 +                    if(taskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"writer %d still blocked on %d args; ",taskStub->taskID[1],taskStub->numBlockingProp) }
    4.61 +                    else {DEBUG__printf2(dbgRqstHdlr,"writer %p still blocked on %d args; ",taskStub,taskStub->numBlockingProp)}
    4.62                  }
    4.63                  ptrEntry->hasEnabledNonFinishedWriter = TRUE;
    4.64              } else {/*Otherwise, put the writer into the entry's Q of waiters.*/
    4.65 @@ -452,6 +468,7 @@
    4.66          if (endingTaskType->argTypes[argNum] == READER) { /*then decrement the enabled and non-finished reader-count in
    4.67            * the hash-entry. */
    4.68              ptrEntry->numEnabledNonDoneReaders -= 1;
    4.69 +            DEBUG__printf1(dbgRqstHdlr,"Releasing read on ptrEntry %p; ",ptrEntry)
    4.70          #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    4.71              Unit u;
    4.72              u.vp = semReq->callingSlv->slaveID;
    4.73 @@ -461,16 +478,18 @@
    4.74                  Dependency newd;
    4.75                  newd.from_vp = ptrEntry->lastWriter.vp;
    4.76                  newd.from_task = ptrEntry->lastWriter.task;
    4.77 -                newd.to_vp = semReq->callingSlv->slaveID;
    4.78 -                newd.to_task = semReq->callingSlv->assignCount;
    4.79 +                newd.to_vp = endingTaskStub->firstOfTask.vp;
    4.80 +                newd.to_task = endingTaskStub->firstOfTask.task;
    4.81                  addToListOfArrays(Dependency, newd, semEnv->dataDependenciesList);
    4.82              }
    4.83          #endif
    4.84              /*If the count becomes zero, then take the next entry from the Q. 
    4.85               *It should be a writer, or else there's a bug in this algorithm.*/
    4.86              if (ptrEntry->numEnabledNonDoneReaders == 0) {
    4.87 +                DEBUG__printf1(dbgRqstHdlr,"ptrEntry %p now free; ",ptrEntry)
    4.88                  waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ);
    4.89 -                if (waitingTaskCarrier == NULL) { //TODO: looks safe to delete the ptr entry at this point 
    4.90 +                if (waitingTaskCarrier == NULL) { //TODO: looks safe to delete the ptr entry at this point
    4.91 +                    DEBUG__printf1(dbgRqstHdlr,"no waiting writer found for ptrEntry %p\n",ptrEntry)
    4.92                      continue; //next iter of loop
    4.93                  }
    4.94                  if (waitingTaskCarrier->isReader)
    4.95 @@ -485,10 +504,19 @@
    4.96                   * task-stub.  If the count has reached zero, then put the
    4.97                   * task-stub into the readyQ.*/
    4.98                  waitingTaskStub->numBlockingProp -= 1;
    4.99 +                if(waitingTaskStub->taskID) {   DEBUG__printf2(dbgRqstHdlr,"writer %d takes ptrEntry %p; ",waitingTaskStub->taskID[1],ptrEntry) }
   4.100 +                else { DEBUG__printf2(dbgRqstHdlr,"writer %p takes ptrEntry %p; ",waitingTaskStub,ptrEntry)}
   4.101                  if (waitingTaskStub->numBlockingProp == 0) {
   4.102 +                    if(waitingTaskStub->taskID) {   DEBUG__printf1(dbgRqstHdlr,"writer %d started; ",waitingTaskStub->taskID[1]) }
   4.103 +                    else { DEBUG__printf1(dbgRqstHdlr,"writer %p started; ",waitingTaskStub) }
   4.104                      writePrivQ(waitingTaskStub, semEnv->taskReadyQ);
   4.105 +                } else {
   4.106 +                    if(waitingTaskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"writer %d still blocked on %d args; ",waitingTaskStub->taskID[1],waitingTaskStub->numBlockingProp) }
   4.107 +                    else {DEBUG__printf2(dbgRqstHdlr,"writer %p still blocked on %d args; ",waitingTaskStub,waitingTaskStub->numBlockingProp)}
   4.108                  }
   4.109 +                VMS_PI__free(waitingTaskCarrier);
   4.110              }
   4.111 +            
   4.112          } else /*the ending task is a writer of this arg*/ { /*clear the enabled non-finished writer flag of the hash-entry.*/
   4.113              ptrEntry->hasEnabledNonFinishedWriter = FALSE;
   4.114          #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.115 @@ -501,8 +529,8 @@
   4.116                          Dependency newd;
   4.117                          newd.from_vp = fragment[i].vp;
   4.118                          newd.from_task = fragment[i].task;
   4.119 -                        newd.to_vp = semReq->callingSlv->slaveID;
   4.120 -                        newd.to_task = semReq->callingSlv->assignCount;
   4.121 +                        newd.to_vp = endingTaskStub->firstOfTask.vp;
   4.122 +                        newd.to_task = endingTaskStub->firstOfTask.task;
   4.123                          addToListOfArrays(Dependency, newd, semEnv->warDependenciesList);
   4.124                      }
   4.125                  }
   4.126 @@ -513,8 +541,8 @@
   4.127                          Dependency newd;
   4.128                          newd.from_vp = fragment[i].vp;
   4.129                          newd.from_task = fragment[i].task;
   4.130 -                        newd.to_vp = semReq->callingSlv->slaveID;
   4.131 -                        newd.to_task = semReq->callingSlv->assignCount;
   4.132 +                        newd.to_vp = endingTaskStub->firstOfTask.vp;
   4.133 +                        newd.to_task = endingTaskStub->firstOfTask.task;
   4.134                          addToListOfArrays(Dependency, newd, semEnv->warDependenciesList);
   4.135                      }
   4.136                  }
   4.137 @@ -522,43 +550,70 @@
   4.138              clearListOfArrays(ptrEntry->readersSinceLastWriter);
   4.139              ptrEntry->lastWriter.vp = semReq->callingSlv->slaveID;
   4.140              ptrEntry->lastWriter.task = semReq->callingSlv->assignCount;
   4.141 -            #endif
   4.142 +        #endif
   4.143  
   4.144 +            DEBUG__printf1(dbgRqstHdlr,"Releasing write on ptrEntry %p; ",ptrEntry)
   4.145              /*Take the next waiter from the hash-entry's Q.*/
   4.146              waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ);
   4.147              if (waitingTaskCarrier == NULL) { //TODO: looks safe to delete ptr entry at this point
   4.148 +                DEBUG__printf1(dbgRqstHdlr,"no waiting task on ptrEntry %p; ",ptrEntry)
   4.149                  continue; //go to next iter of loop, done here.
   4.150              }
   4.151              waitingTaskStub = waitingTaskCarrier->taskStub;
   4.152  
   4.153              /*If task is a writer of this hash-entry's pointer*/
   4.154              if (!waitingTaskCarrier->isReader) { /* then turn the flag back on.*/
   4.155 +                if(waitingTaskStub->taskID) {   DEBUG__printf2(dbgRqstHdlr,"writer %d takes ptrEntry %p; ",waitingTaskStub->taskID[1],ptrEntry) }
   4.156 +                else { DEBUG__printf2(dbgRqstHdlr,"Writer %p takes ptrEntry %p; ",waitingTaskStub,ptrEntry)}
   4.157                  ptrEntry->hasEnabledNonFinishedWriter = TRUE;
   4.158                  /*Decrement the writer's blocking-propendent-count in task-stub
   4.159                   * If it becomes zero, then put the task-stub into the readyQ.*/
   4.160                  waitingTaskStub->numBlockingProp -= 1;
   4.161                  if (waitingTaskStub->numBlockingProp == 0) {
   4.162 +                    if(waitingTaskStub->taskID) {   DEBUG__printf1(dbgRqstHdlr,"writer %d started; ",waitingTaskStub->taskID[1]) }
   4.163 +                    else {DEBUG__printf1(dbgRqstHdlr,"writer %p started; ",waitingTaskStub)}
   4.164                      writePrivQ(waitingTaskStub, semEnv->taskReadyQ);
   4.165 +                } else {
   4.166 +                    if(waitingTaskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"writer %d still blocked on %d args; ",waitingTaskStub->taskID[1],waitingTaskStub->numBlockingProp) }
   4.167 +                    else {DEBUG__printf2(dbgRqstHdlr,"writer %p still blocked on %d args; ",waitingTaskStub,waitingTaskStub->numBlockingProp)}
   4.168                  }
   4.169 +                VMS_PI__free(waitingTaskCarrier);
   4.170              } else { /*Waiting task is a reader, so do a loop, of all waiting readers
   4.171               * until encounter a writer or waitersQ is empty*/
   4.172                  while (TRUE) /*The checks guarantee have a waiting reader*/ { /*Increment the hash-entry's count of enabled non-finished
   4.173                  * readers.*/
   4.174 -                    ptrEntry->numEnabledNonDoneReaders += 1;
   4.175 +                    //deal with tasks suspended by taskwait_on here - these don't count as a dependency but are otherwise treated like readers
   4.176 +                    if(waitingTaskCarrier->isSuspended){
   4.177 +                        if(waitingTaskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"writer %d still blocked on %d args; ",waitingTaskStub->taskID[1],waitingTaskStub->numBlockingProp) }
   4.178 +                    else {DEBUG__printf2(dbgRqstHdlr,"task %p taskwaiting on ptr %p resumed; ",waitingTaskStub,ptrEntry)}
   4.179 +                        resume_slaveVP(waitingTaskStub->slaveAssignedTo, semEnv);
   4.180 +                    } else {
   4.181  
   4.182 -                    /*Decrement the blocking propendents count of the reader's
   4.183 -                     * task-stub.  If it reaches zero, then put the task-stub
   4.184 -                     * into the readyQ.*/
   4.185 -                    waitingTaskStub->numBlockingProp -= 1;
   4.186 -                    if (waitingTaskStub->numBlockingProp == 0) {
   4.187 -                        writePrivQ(waitingTaskStub, semEnv->taskReadyQ);
   4.188 -                    }
   4.189 +                        ptrEntry->numEnabledNonDoneReaders += 1;
   4.190 +                        if(waitingTaskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"Reader %d now on ptrEntry %p; ",waitingTaskStub->taskID[1],ptrEntry) }
   4.191 +                        else {DEBUG__printf2(dbgRqstHdlr,"Reader %p now on ptrEntry %p; ",waitingTaskStub,ptrEntry)}
   4.192 +                        /*Decrement the blocking propendents count of the reader's
   4.193 +                         * task-stub.  If it reaches zero, then put the task-stub
   4.194 +                         * into the readyQ.*/
   4.195 +                        waitingTaskStub->numBlockingProp -= 1;
   4.196 +                        
   4.197 +                        if (waitingTaskStub->numBlockingProp == 0) {
   4.198 +                            if(waitingTaskStub->taskID) {   DEBUG__printf1(dbgRqstHdlr,"writer %d started; ",waitingTaskStub->taskID[1]) }
   4.199 +                            else {DEBUG__printf1(dbgRqstHdlr,"writer %p started; ",waitingTaskStub)}
   4.200 +                            writePrivQ(waitingTaskStub, semEnv->taskReadyQ);
   4.201 +                        } else {
   4.202 +                                if(waitingTaskStub->taskID) {  DEBUG__printf2(dbgRqstHdlr,"reader %d still blocked on %d args; ",waitingTaskStub->taskID[1],waitingTaskStub->numBlockingProp) }
   4.203 +                                else {DEBUG__printf2(dbgRqstHdlr,"reader %p still blocked on %d args; ",waitingTaskStub,waitingTaskStub->numBlockingProp)}
   4.204 +                        }
   4.205 +                    } //if-else, suspended or normal reader
   4.206 +                    //discard carrier
   4.207 +                    VMS_PI__free(waitingTaskCarrier);
   4.208                      /*Get next waiting task*/
   4.209                      waitingTaskCarrier = peekPrivQ(ptrEntry->waitersQ);
   4.210                      if (waitingTaskCarrier == NULL) break;
   4.211                      if (!waitingTaskCarrier->isReader) break;
   4.212                      waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ);
   4.213 -                    waitingTaskStub = waitingTaskCarrier->taskStub;
   4.214 +                    waitingTaskStub = waitingTaskCarrier->taskStub;                   
   4.215                  }//while waiter is a reader
   4.216              }//if-else, first waiting task is a reader
   4.217          }//if-else, check of ending task, whether writer or reader
   4.218 @@ -1018,7 +1073,91 @@
   4.219      }
   4.220  }
   4.221  
   4.222 +void handleTaskwaitOn(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) {
   4.223 +    VSsTaskStub* requestingTaskStub;
   4.224 +    VSsSemData* semData;
   4.225 +    VSsPointerEntry* ptrEntry;
   4.226 +    VSsTaskStubCarrier *taskCarrier;
   4.227 +    uint32 key[5];
   4.228 +    HashEntry* rawHashEntry; //has char *, but use with uint32 *
   4.229 +    HashTable* argPtrHashTbl = semEnv->argPtrHashTbl;
   4.230  
   4.231 +
   4.232 +    semData = (VSsSemData *) semReq->callingSlv->semanticData;
   4.233 +    requestingTaskStub = semData->taskStub;
   4.234 +
   4.235 +    if(requestingTaskStub->taskID) {   DEBUG__printf2(dbgRqstHdlr,
   4.236 +            "TaskwaitOn request from processor %d, task: %d",requestingSlv->slaveID,
   4.237 +            requestingTaskStub->taskID[1]) }
   4.238 +    else {DEBUG__printf1(dbgRqstHdlr, "TaskwaitOn request from processor %d",
   4.239 +            requestingSlv->slaveID);}
   4.240 +    
   4.241 +    void* ptr = semReq->args;
   4.242 +
   4.243 +    key[0] = 4; //two 32b values in key
   4.244 +    *((uint64*) & key[1]) = (uint64) ptr; //write 64b into two 32b
   4.245 +    *((uint64*) & key[3]) = (uint64) requestingTaskStub->parentTaskStub;
   4.246 +    /*If the hash entry was chained, put it at the
   4.247 +     * start of the chain.  (Means no-longer-used pointers accumulate
   4.248 +     * at end of chain, decide garbage collection later) */
   4.249 +    rawHashEntry = getEntryFromTable32(key, argPtrHashTbl);
   4.250 +    if (rawHashEntry == NULL) { //adding a value auto-creates the hash-entry
   4.251 +        ptrEntry = create_pointer_entry();
   4.252 +        rawHashEntry = addValueIntoTable32(key, ptrEntry, argPtrHashTbl);
   4.253 +    } else {
   4.254 +        ptrEntry = (VSsPointerEntry *) rawHashEntry->content;
   4.255 +        if (ptrEntry == NULL) {
   4.256 +            ptrEntry = create_pointer_entry();
   4.257 +            rawHashEntry = addValueIntoTable32(key, ptrEntry, argPtrHashTbl);
   4.258 +        }
   4.259 +    }
   4.260 +
   4.261 +    if (!ptrEntry->hasEnabledNonFinishedWriter &&
   4.262 +            isEmptyPrivQ(ptrEntry->waitersQ)) { 
   4.263 +        resume_slaveVP(requestingSlv, semEnv);
   4.264 +    } else { /*Otherwise, the suspended task is put into the hash-entry's Q of
   4.265 +             * waiters*/
   4.266 +        taskCarrier = create_task_carrier(requestingTaskStub, -1, READER);
   4.267 +        taskCarrier->isSuspended = TRUE;
   4.268 +        writePrivQ(taskCarrier, ptrEntry->waitersQ);
   4.269 +    }
   4.270 +}
   4.271 +
   4.272 +void handleCriticalStart(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv){
   4.273 +    VSsSemData* semData;
   4.274 +    int32 criticalID;
   4.275 +    DEBUG__printf1(dbgRqstHdlr, "CriticalStart request from processor %d",
   4.276 +            requestingSlv->slaveID)
   4.277 +
   4.278 +    semData = (VSsSemData *) semReq->callingSlv->semanticData;
   4.279 +    
   4.280 +    criticalID = semReq->criticalID;
   4.281 +    if(!semEnv->criticalSection[criticalID].isOccupied){
   4.282 +        semEnv->criticalSection[criticalID].isOccupied = TRUE;
   4.283 +        resume_slaveVP(requestingSlv, semEnv);
   4.284 +    } else {
   4.285 +        writePrivQ(requestingSlv, semEnv->criticalSection[criticalID].waitQ);
   4.286 +    }
   4.287 +}
   4.288 +
   4.289 +void handleCriticalEnd(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv){
   4.290 +    VSsSemData* semData;
   4.291 +    SlaveVP *waitingSlv;
   4.292 +    int32 criticalID;
   4.293 +    DEBUG__printf1(dbgRqstHdlr, "CriticalEnd request from processor %d",
   4.294 +            requestingSlv->slaveID)
   4.295 +
   4.296 +    semData = (VSsSemData *) semReq->callingSlv->semanticData;
   4.297 +    
   4.298 +    criticalID = semReq->criticalID;
   4.299 +    semEnv->criticalSection[criticalID].isOccupied = FALSE;
   4.300 +    waitingSlv = readPrivQ(semEnv->criticalSection[criticalID].waitQ);
   4.301 +    if(waitingSlv!=NULL){
   4.302 +        semEnv->criticalSection[criticalID].isOccupied = TRUE;
   4.303 +        resume_slaveVP(waitingSlv, semEnv);
   4.304 +    }
   4.305 +    resume_slaveVP(requestingSlv, semEnv);
   4.306 +}
   4.307  //==========================================================================
   4.308  
   4.309  /*
     5.1 --- a/VSs_Request_Handlers.h	Tue Sep 25 16:12:40 2012 +0200
     5.2 +++ b/VSs_Request_Handlers.h	Mon Oct 29 16:57:56 2012 +0100
     5.3 @@ -28,6 +28,12 @@
     5.4  handleReceiveFromTo( VSsSemReq *semReq, VSsSemEnv *semEnv);
     5.5  void
     5.6  handleTaskwait(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
     5.7 +void
     5.8 +handleTaskwaitOn(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
     5.9 +void
    5.10 +handleCriticalStart(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
    5.11 +void
    5.12 +handleCriticalEnd(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv);
    5.13  
    5.14  void
    5.15  handleMalloc( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv);