changeset 8:eb3d77ca9f59 dev_expl_VP_and_DKU

Code complete -- not debuggedd yet
author Sean Halle <seanhalle@yahoo.com>
date Thu, 02 Aug 2012 01:03:14 -0700
parents 3999b8429ddd
children 832bc715fbf2
files VSs.c VSs.h VSs_PluginFns.c VSs_Request_Handlers.c
diffstat 4 files changed, 290 insertions(+), 225 deletions(-) [+]
line diff
     1.1 --- a/VSs.c	Wed Aug 01 03:16:27 2012 -0700
     1.2 +++ b/VSs.c	Thu Aug 02 01:03:14 2012 -0700
     1.3 @@ -76,7 +76,7 @@
     1.4   { VSsSemEnv   *semEnv;
     1.5     SlaveVP     *seedSlv;
     1.6     VSsSemData  *semData;
     1.7 -   VSsTaskStub *explPrTaskStub;
     1.8 +   VSsTaskStub *threadTaskStub, *parentTaskStub;
     1.9  
    1.10     VSs__init();      //normal multi-thd
    1.11     
    1.12 @@ -87,14 +87,21 @@
    1.13     seedSlv = VSs__create_slave_helper( fnPtr, initData,
    1.14                                       semEnv, semEnv->nextCoreToGetNewSlv++ );
    1.15     
    1.16 -      //seed slave is an explicit processor, so make one of the special
    1.17 -      // task stubs for explicit processors, and attach it to the slave
    1.18 -   explPrTaskStub = create_expl_proc_task_stub( initData );
    1.19 +      //seed slave is a thread slave, so make a thread's task stub for it
    1.20 +      // and then make another to stand for the seed's parent task.  Make
    1.21 +      // the parent be already ended, and have one child (the seed).  This
    1.22 +      // will make the dissipate handler do the right thing when the seed
    1.23 +      // is dissipated.
    1.24 +   threadTaskStub = create_thread_task_stub( initData );
    1.25 +   parentTaskStub = create_thread_task_stub( NULL );
    1.26 +   parentTaskStub->isEnded = TRUE;
    1.27 +   parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
    1.28 +   threadTaskStub->parentTasksStub = parentTaskStub;
    1.29     
    1.30     semData = (VSsSemData *)seedSlv->semanticData;
    1.31 -      //seedVP already has a permanent task
    1.32 +      //seedVP is a thread, so has a permanent task
    1.33     semData->needsTaskAssigned = FALSE;
    1.34 -   semData->taskStub = explPrTaskStub;
    1.35 +   semData->taskStub = threadTaskStub;
    1.36  
    1.37     resume_slaveVP( seedSlv, semEnv ); //returns right away, just queues Slv
    1.38     
    1.39 @@ -239,7 +246,8 @@
    1.40        semanticEnv->transactionStrucs[i].waitingVPQ   = makeVMSQ();
    1.41      }
    1.42  
    1.43 -   semanticEnv->numAdditionalSlvs   = 0; //must be last
    1.44 +   semanticEnv->numLiveExtraTaskSlvs   = 0; //must be last
    1.45 +   semanticEnv->numLiveThreadSlvs      = 1; //must be last, count the seed
    1.46  
    1.47     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    1.48     semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
    1.49 @@ -415,18 +423,24 @@
    1.50   * this.
    1.51   * 
    1.52   *This must update the count of active sub-tasks (sub-threads) of parents,
    1.53 - * and the semantic data and task stub must stay 
    1.54 + * and the semantic data and task stub must stay.   
    1.55   */
    1.56  void
    1.57  VSs__end_thread( SlaveVP *thdToEnd )
    1.58 - {
    1.59 + { VSsSemData *semData;
    1.60 + 
    1.61        //check whether all sub-tasks have ended.. if not, don't free the
    1.62        // semantic data nor task stub of this thread.
    1.63 -   check_sub_tasks();
    1.64 +   semData = (VSsSemData *)thdToEnd->semanticData;
    1.65 +   if( semData->taskStub->numLiveChildTasks != 0 )
    1.66 +    { 
    1.67 +   fix_me();   
    1.68 +    }
    1.69     
    1.70        //Update the count of live sub-tasks in parent.  If parent was a
    1.71        // thread and has already ended, then if this was the last sub-task,
    1.72        // free the semantic data and task stub of the parent.
    1.73 +   
    1.74     VMS_WL__send_dissipate_req( thdToEnd );
    1.75   }
    1.76  
     2.1 --- a/VSs.h	Wed Aug 01 03:16:27 2012 -0700
     2.2 +++ b/VSs.h	Thu Aug 02 01:03:14 2012 -0700
     2.3 @@ -41,6 +41,10 @@
     2.4  #define READER  1  /*Trick -- READER same as IN*/
     2.5  #define WRITER  2  /*Trick -- WRITER same as OUT and INOUT*/
     2.6  
     2.7 +#define IS_A_THREAD NULL
     2.8 +#define IS_ENDED    NULL
     2.9 +#define SEED_SLV    NULL
    2.10 +
    2.11  typedef struct
    2.12   {
    2.13     VSsTaskFnPtr fn;
    2.14 @@ -69,22 +73,15 @@
    2.15     int32        numBlockingProp;
    2.16     SlaveVP     *slaveAssignedTo;
    2.17     VSsPointerEntry  **ptrEntries;
    2.18 -   void*        parent;
    2.19 -   bool32       parentIsTask;
    2.20 -   int32        numChildTasks;
    2.21 -   bool32       isWaiting;
    2.22 +   void*        parentTasksStub;
    2.23 +   int32        numLiveChildTasks;
    2.24 +   int32        numLiveChildThreads;
    2.25 +   bool32       isWaitingForChildTasksToEnd;
    2.26 +   bool32       isWaitingForChildThreadsToEnd;
    2.27 +   bool32       isEnded;
    2.28   }
    2.29  VSsTaskStub;
    2.30  
    2.31 -typedef struct 
    2.32 - {
    2.33 -    void* parent;
    2.34 -    bool32       parentIsTask;
    2.35 -    int32        numChildTasks;
    2.36 -    bool32       isWaiting;
    2.37 -   SlaveVP     *slaveAssignedTo;    
    2.38 - }
    2.39 -VSsThreadInfo;
    2.40  
    2.41  typedef struct
    2.42   {
    2.43 @@ -186,12 +183,13 @@
    2.44  typedef struct
    2.45   {
    2.46     PrivQueueStruc **slavesReadyToResumeQ; //Shared (slaves not pinned)
    2.47 -   PrivQueueStruc **extraTaskSlvQ;     //Shared
    2.48 +   PrivQueueStruc **freeExtraTaskSlvQ;     //Shared
    2.49     PrivQueueStruc  *taskReadyQ;        //Shared (tasks not pinned)
    2.50     SlaveVP         *currTaskSlvs[NUM_CORES][NUM_ANIM_SLOTS];
    2.51     HashTable       *argPtrHashTbl;
    2.52     HashTable       *commHashTbl;
    2.53 -   int32            numAdditionalSlvs;
    2.54 +   int32            numLiveExtraTaskSlvs;
    2.55 +   int32            numLiveThreadSlvs;
    2.56     int32            nextCoreToGetNewSlv;
    2.57     int32            primitiveStartTime;
    2.58  
    2.59 @@ -230,7 +228,11 @@
    2.60   };
    2.61  //TransListElem
    2.62   
    2.63 - 
    2.64 +enum VSsSlvType
    2.65 + { extraTaskSlv = 1,
    2.66 +   slotTaskSlv,
    2.67 +   threadSlv
    2.68 + };
    2.69   
    2.70  typedef struct
    2.71   {
    2.72 @@ -238,7 +240,7 @@
    2.73     TransListElem *lastTransEntered;
    2.74     bool32         needsTaskAssigned;
    2.75     VSsTaskStub   *taskStub;
    2.76 -   VSsThreadInfo *threadInfo;
    2.77 +   VSsSlvType     slaveType;
    2.78   }
    2.79  VSsSemData;
    2.80   
    2.81 @@ -357,7 +359,7 @@
    2.82                            VSsSemEnv *semEnv,    int32 coreToAssignOnto );
    2.83  
    2.84  VSsTaskStub *
    2.85 -create_expl_proc_task_stub( void *initData );
    2.86 +create_thread_task_stub( void *initData );
    2.87  
    2.88  
    2.89  SlaveVP *
     3.1 --- a/VSs_PluginFns.c	Wed Aug 01 03:16:27 2012 -0700
     3.2 +++ b/VSs_PluginFns.c	Thu Aug 02 01:03:14 2012 -0700
     3.3 @@ -50,38 +50,29 @@
     3.4   */
     3.5  SlaveVP *
     3.6  VSs__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot )
     3.7 - { SlaveVP    *returnSlv;
     3.8 -   VSsSemEnv  *semEnv;
     3.9 -   VSsSemData *semData;
    3.10 -   int32       coreNum, slotNum;
    3.11 -   
    3.12 + { SlaveVP     *returnSlv;
    3.13 +   VSsSemEnv   *semEnv;
    3.14 +   VSsSemData  *semData;
    3.15 +   int32        coreNum, slotNum;
    3.16 +   VSsTaskStub *newTaskStub;
    3.17 +   SlaveVP     *extraSlv;
    3.18 +  
    3.19     coreNum = slot->coreSlotIsOn;
    3.20     slotNum = slot->slotIdx;
    3.21     
    3.22     semEnv  = (VSsSemEnv *)_semEnv;
    3.23  
    3.24 +      //Speculatively set the return slave to the current taskSlave
    3.25 +      //TODO: false sharing ?  Always read..
    3.26 +   returnSlv = semEnv->currTaskSlvs[coreNum][slotNum];
    3.27     
    3.28 -   //Speculatively set the return slave to the current taskSlave
    3.29 -   //TODO: false sharing ?  Always read..
    3.30 -   returnSlv = semEnv->currTaskSlvs[coreNum][slotNum];
    3.31 -
    3.32 -/* request handlers do this now..  move it to there..
    3.33 -   if( returnSlv == NULL )
    3.34 -    {    //make a new slave to animate
    3.35 -         //This happens for the first task on the core and when all available
    3.36 -         //slaves are blocked by constructs like send, or mutex, and so on.
    3.37 -      returnSlv = VSs__create_slave_helper( NULL, NULL, semEnv, coreNum );
    3.38 -    }
    3.39 - */ 
    3.40     semData = (VSsSemData *)returnSlv->semanticData;
    3.41  
    3.42        //There is always a curr task slave, and it always needs a task
    3.43 -      //(task slaves that are resuming are in resumeQ)
    3.44 -   VSsTaskStub *newTaskStub;
    3.45 -   SlaveVP *extraSlv;
    3.46 +      // (task slaves that are resuming are in resumeQ)
    3.47     newTaskStub = readPrivQ( semEnv->taskReadyQ );
    3.48     if( newTaskStub != NULL )
    3.49 -    { //point slave to task's function, and mark slave as having task
    3.50 +    {    //point slave to task's function, and mark slave as having task
    3.51        VMS_int__reset_slaveVP_to_TopLvlFn( returnSlv, 
    3.52                            newTaskStub->taskType->fn, newTaskStub->args );
    3.53        semData->taskStub            = newTaskStub;
    3.54 @@ -92,7 +83,7 @@
    3.55        goto ReturnTheSlv;
    3.56      }
    3.57     else
    3.58 -    { //no task, so try to get a ready to resume slave
    3.59 +    {    //no task, so try to get a ready to resume slave
    3.60        returnSlv = readPrivQ( semEnv->slavesReadyToResumeQ );
    3.61        if( returnSlv != NULL )  //Yes, have a slave, so return it.
    3.62         { returnSlv->coreAnimatedBy   = coreNum;
    3.63 @@ -100,8 +91,8 @@
    3.64              semEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
    3.65           goto ReturnTheSlv;
    3.66         }
    3.67 -      //If get here, then no task, so check if have extra free slaves
    3.68 -      extraSlv = readPrivQ( semEnv->extraTaskSlvQ );
    3.69 +         //If get here, then no task, so check if have extra free slaves
    3.70 +      extraSlv = readPrivQ( semEnv->freeExtraTaskSlvQ );
    3.71        if( extraSlv != NULL )
    3.72         {    //means have two slaves need tasks -- redundant, kill one
    3.73           handleDissipate( extraSlv, semEnv );
    3.74 @@ -111,9 +102,10 @@
    3.75         }
    3.76        else
    3.77         { //candidate for shutdown.. if all extras dissipated, and no tasks
    3.78 -         // and no ready to resume slaves, then then no way to generate
    3.79 +         // and no ready to resume slaves, then no way to generate
    3.80           // more tasks..
    3.81 -         if( semEnv->numAdditionalSlvs == 0 ) //means none suspended
    3.82 +         if( semEnv->numLiveExtraTaskSlvs == 0 && 
    3.83 +             semEnv->numLiveThreadSlvs == 0 )
    3.84            { //This core sees no way to generate more tasks, so say it
    3.85              if( semEnv->coreIsDone[coreNum] == FALSE )
    3.86               { semEnv->numCoresDone += 1;
    3.87 @@ -272,21 +264,67 @@
    3.88  
    3.89  
    3.90  //=========================== VMS Request Handlers ==============================
    3.91 -/*SlaveVP dissipate  (NOT task-end!)
    3.92 +/*SlaveVP dissipate -- this is NOT task-end!, only call this to get rid of
    3.93 + * extra task slaves, and to end explicitly created threads
    3.94   */
    3.95  inline void
    3.96  handleDissipate( SlaveVP *requestingSlv, VSsSemEnv *semEnv )
    3.97 - {
    3.98 + { VSsSemData  *semData;
    3.99 +   VSsTaskStub *parentTaskStub, *ownTaskStub;
   3.100 + 
   3.101           DEBUG__printf1(dbgRqstHdlr,"Dissipate request from processor %d",
   3.102                                                       requestingSlv->slaveID)
   3.103 -       
   3.104 -   semEnv->numAdditionalSlvs -= 1;
   3.105 +   semData = (VSsSemData *)requestingSlv->semanticData;
   3.106 +
   3.107 +   if( semData->slaveType == extraTaskSlv )
   3.108 +    { semEnv->numLiveExtraTaskSlvs -= 1; //for detecting shutdown condition
   3.109 +         //Has no task assigned, so no parents and no children, so free self
   3.110 +      goto FreeSlaveStateAndReturn;
   3.111 +    }
   3.112 +
   3.113 +   if( semData->slaveType == slotTaskSlv )
   3.114 +    {    //should never call dissipate on a slot assigned slave
   3.115 +      VMS__throw_exception(); 
   3.116 +    }
   3.117 +
   3.118 +      //if make it to here, then is a thread slave
   3.119 +   semEnv->numLiveThreadSlvs -= 1; //for detecting shutdown condition
   3.120 +   ownTaskStub    = semData->taskStub;
   3.121 +   parentTaskStub = ownTaskStub->parentTasksStub;
   3.122     
   3.123 -      //free any semantic data allocated to the virt procr
   3.124 -   VMS_PI__free( requestingSlv->semanticData );
   3.125 +      //if all children ended, then free this task's stub
   3.126 +      // else, keep stub around, and last child will free it (below)
   3.127 +   if( ownTaskStub->numLiveChildTasks   == 0 &&
   3.128 +       ownTaskStub->numLiveChildThreads == 0 )
   3.129 +      free_task_stub( ownTaskStub );
   3.130 +   else
   3.131 +      ownTaskStub->isEnded = TRUE; //for children to see when they end
   3.132  
   3.133 -      //Now, call VMS to free_all AppVP state -- stack and so on
   3.134 +      //check if this is last child of ended parent
   3.135 +   parentTaskStub->numLiveChildThreads -= 1;  //parent stub cannot be NULL
   3.136 +   if( parentTaskStub->isEnded )
   3.137 +    { if( parentTaskStub->numLiveChildTasks   == 0 && 
   3.138 +          parentTaskStub->numLiveChildThreads == 0 )
   3.139 +         free_task_stub( parentTaskStub ); //just stub, semData already freed
   3.140 +    }
   3.141 + 
   3.142 +      //Now, check on waiting parents -- could be waiting on just tasks or
   3.143 +      // just threads or both.  Handle each case.
   3.144 +   if( parentTaskStub->isWaitingForChildThreadsToEnd )
   3.145 +    { if( parentTaskStub->numLiveChildThreads == 0 )
   3.146 +       { parentTaskStub->isWaitingForChildThreadsToEnd = FALSE;
   3.147 +         if( parentTaskStub->isWaitingForChildTasksToEnd )
   3.148 +            return; //still waiting on tasks, nothing to do
   3.149 +         else //parent free to resume
   3.150 +            resume_slaveVP( parentTaskStub->slaveAssignedTo, semEnv );
   3.151 +       }
   3.152 +    }
   3.153 +
   3.154 +      //Free the semData and requesting slave's base state for all cases
   3.155 + FreeSlaveStateAndReturn:
   3.156 +   VMS_PI__free( semData );
   3.157     VMS_PI__dissipate_slaveVP( requestingSlv );
   3.158 +   return; 
   3.159   }
   3.160  
   3.161  /*Re-use this in the entry-point fn
   3.162 @@ -300,20 +338,14 @@
   3.163        //This is running in master, so use internal version
   3.164     newSlv = VMS_PI__create_slaveVP( fnPtr, initData );
   3.165  
   3.166 -   semEnv->numAdditionalSlvs += 1;
   3.167 -
   3.168 +      //task slaves differ from thread slaves by the settings in the taskStub
   3.169 +      //so, don't create task stub here, only create semData, which is same
   3.170 +      // for all kinds of slaves
   3.171     semData = VMS_PI__malloc( sizeof(VSsSemData) );
   3.172     semData->highestTransEntered = -1;
   3.173     semData->lastTransEntered    = NULL;
   3.174     semData->needsTaskAssigned   = TRUE;
   3.175     
   3.176 -   semData->threadInfo = VMS_PI__malloc( sizeof(VSsThreadInfo) );
   3.177 -   semData->threadInfo->isWaiting = FALSE;
   3.178 -   semData->threadInfo->numChildTasks = 0;
   3.179 -   semData->threadInfo->parent = NULL;
   3.180 -   semData->threadInfo->parentIsTask = FALSE;
   3.181 -   semData->threadInfo->slaveAssignedTo = newSlv;
   3.182 -   
   3.183     newSlv->semanticData = semData;
   3.184  
   3.185     //=================== Assign new processor to a core =====================
   3.186 @@ -340,64 +372,22 @@
   3.187     return newSlv;
   3.188   }
   3.189  
   3.190 -/*This has been removed, because have changed things.. the only way to 
   3.191 - * create a slaveVP now is to either do an explicit create in the app, or
   3.192 - * else for req hdlr to create it when a task suspends if no extras are
   3.193 - * free.
   3.194 - *So, only have handleExplCreate for now.. and have the req hdlrs use the
   3.195 - * helper
   3.196 - *SlaveVP create  (NOT task create!)
   3.197 - *
   3.198 - */
   3.199 -/*
   3.200 -inline void
   3.201 -handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv  )
   3.202 - { VSsSemReq *semReq;
   3.203 -   SlaveVP    *newSlv;
   3.204 -   
   3.205 -   
   3.206 -   semReq = VMS_PI__take_sem_reqst_from( req );
   3.207 - 
   3.208 -   newSlv = VSs__create_slave_helper( semReq->fnPtr, semReq->initData,
   3.209 -                                         semEnv, semReq->coreToAssignOnto );
   3.210 -   
   3.211 -   ((VSsSemData*)newSlv->semanticData)->threadInfo->parent = requestingSlv;
   3.212 -   
   3.213 -         DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d",
   3.214 -                                    requestingSlv->slaveID, newSlv->slaveID)
   3.215 -
   3.216 -   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   3.217 -   Dependency newd;
   3.218 -   newd.from_vp = requestingSlv->slaveID;
   3.219 -   newd.from_task = requestingSlv->assignCount;
   3.220 -   newd.to_vp = newSlv->slaveID;
   3.221 -   newd.to_task = 1;
   3.222 -   addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   3.223 -   #endif
   3.224 -
   3.225 -      //For VSs, caller needs ptr to created processor returned to it
   3.226 -   requestingSlv->dataRetFromReq = newSlv;
   3.227 -
   3.228 -   resume_slaveVP( requestingSlv, semEnv );
   3.229 -   resume_slaveVP( newSlv,        semEnv );
   3.230 - }
   3.231 -*/
   3.232 -
   3.233  VSsTaskStub *
   3.234 -create_expl_proc_task_stub( void *initData )
   3.235 +create_thread_task_stub( void *initData )
   3.236   { VSsTaskStub *newStub;
   3.237           
   3.238     newStub = VMS_PI__malloc( sizeof(VSsTaskStub) );
   3.239     newStub->numBlockingProp = 0;
   3.240     newStub->slaveAssignedTo = NULL; //set later
   3.241 -   newStub->taskType        = NULL; //Identifies as an explicit processor
   3.242 +   newStub->taskType        = IS_A_THREAD;
   3.243     newStub->ptrEntries      = NULL;
   3.244     newStub->args            = initData;  
   3.245 -   newStub->numChildTasks   = 0;
   3.246 -   newStub->parent          = NULL;
   3.247 -   newStub->isWaiting       = FALSE;
   3.248 +   newStub->numLiveChildTasks              = 0;
   3.249 +   newStub->numLiveChildThreads            = 0;
   3.250 +   newStub->parentTasksStub                = NULL;
   3.251 +   newStub->isWaitingForChildTasksToEnd    = FALSE;
   3.252 +   newStub->isWaitingForChildThreadsToEnd  = FALSE;
   3.253     newStub->taskID          = NULL;
   3.254 -   newStub->parentIsTask    = FALSE;
   3.255  
   3.256     return newStub;
   3.257   }
   3.258 @@ -405,6 +395,8 @@
   3.259  /*Application invokes this when it explicitly creates a thread via the
   3.260   * "VSs__create_thread()" command.
   3.261   * 
   3.262 + *The request handlers create new task slaves directly, not via this hdlr.
   3.263 + * 
   3.264   *Make everything in VSs be a task.  An explicitly created VP is just a
   3.265   * suspendable task, and the seedVP is also a suspendable task. 
   3.266   *So, here, create a task Stub. 
   3.267 @@ -418,27 +410,38 @@
   3.268   { VSsSemReq  *semReq;
   3.269     SlaveVP    *newSlv;
   3.270     VSsSemData *semData, *parentSemData;
   3.271 -   VSsTaskStub *explPrTaskStub;
   3.272     
   3.273     semReq = VMS_PI__take_sem_reqst_from( req );
   3.274  
   3.275 -      //use an idle "extra" slave, if have one
   3.276 -   newSlv = readPrivQ( semEnv->extraTaskSlvQ );
   3.277 -   if( newSlv == NULL ) //or, create a new slave, if no extras
   3.278 -      newSlv = VSs__create_slave_helper( semReq->fnPtr, semReq->initData,
   3.279 -                                         semEnv, semReq->coreToAssignOnto );
   3.280 -      
   3.281 -   
   3.282 -   semData = ( (VSsSemData *)newSlv->semanticData );
   3.283 -   semData->needsTaskAssigned = FALSE;
   3.284 -   semData->taskStub = create_expl_proc_task_stub( semReq->initData );
   3.285 -   semData->taskStub->parent = semReq->callingSlv;
   3.286 +   semEnv->numLiveThreadSlvs += 1;
   3.287     
   3.288        //Deceptive -- must work when creator is a normal task, or seed,
   3.289        // or another thd.. think have valid sem data and task stub for all
   3.290 +      //This hdlr is NOT called when creating the seed slave
   3.291     parentSemData = (VSsSemData *)semReq->callingSlv->semanticData;
   3.292 -   parentSemData->taskStub->numChildTasks += 1;
   3.293 -   
   3.294 +   parentSemData->taskStub->numLiveChildThreads += 1;
   3.295 +
   3.296 +      //use an idle "extra" slave, if have one
   3.297 +   newSlv = readPrivQ( semEnv->freeExtraTaskSlvQ );
   3.298 +   if( newSlv != NULL ) //got an idle one, so reset it
   3.299 +    { semData = (VSsSemData *)newSlv->semanticData;
   3.300 +      semData->highestTransEntered = -1;
   3.301 +      semData->lastTransEntered    = NULL;
   3.302 +      VMS_int__reset_slaveVP_to_TopLvlFn( newSlv, semReq->fnPtr, 
   3.303 +                                                         semReq->initData );
   3.304 +    }
   3.305 +   else //no idle ones, create a new
   3.306 +    { newSlv = VSs__create_slave_helper( semReq->fnPtr, semReq->initData,
   3.307 +                                         semEnv, semReq->coreToAssignOnto ); 
   3.308 +      semData = (VSsSemData *)newSlv->semanticData;
   3.309 +    }
   3.310 +
   3.311 +      //now, create a new task and assign to the thread
   3.312 +   semData->needsTaskAssigned = FALSE;  //thread has a permanent task
   3.313 +   semData->taskStub = create_thread_task_stub( semReq->initData );
   3.314 +   semData->taskStub->parentTasksStub = parentSemData->taskStub;
   3.315 +   semData->slaveType = threadSlv; //this hdlr only creates thread slvs
   3.316 +
   3.317           DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d",
   3.318                                      requestingSlv->slaveID, newSlv->slaveID)
   3.319  
   3.320 @@ -451,7 +454,7 @@
   3.321     addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   3.322     #endif
   3.323  
   3.324 -      //For VSs, caller needs ptr to created processor returned to it
   3.325 +      //For VSs, caller needs ptr to created thread returned to it
   3.326     requestingSlv->dataRetFromReq = newSlv;
   3.327  
   3.328     resume_slaveVP( requestingSlv, semEnv );
     4.1 --- a/VSs_Request_Handlers.c	Wed Aug 01 03:16:27 2012 -0700
     4.2 +++ b/VSs_Request_Handlers.c	Thu Aug 02 01:03:14 2012 -0700
     4.3 @@ -83,18 +83,18 @@
     4.4  inline VSsTaskStub *
     4.5  create_task_stub( VSsTaskType *taskType, void **args )
     4.6   { void **newArgs;
     4.7 -   int32  i, numArgs;
     4.8     VSsTaskStub *
     4.9     newStub = VMS_int__malloc( sizeof(VSsTaskStub) + taskType->sizeOfArgs );
    4.10     newStub->numBlockingProp = taskType->numCtldArgs;
    4.11     newStub->slaveAssignedTo = NULL;
    4.12 -   newStub->taskType = taskType;
    4.13 +   newStub->taskType   = taskType;
    4.14     newStub->ptrEntries = 
    4.15        VMS_int__malloc( taskType->numCtldArgs * sizeof(VSsPointerEntry *) );
    4.16     newArgs = (void **)( (uint8 *)newStub + sizeof(VSsTaskStub) );
    4.17     newStub->args = newArgs;
    4.18 -   newStub->numChildTasks = 0;
    4.19 -   newStub->parent = NULL;
    4.20 +   newStub->numLiveChildTasks   = 0;
    4.21 +   newStub->numLiveChildThreads = 0;
    4.22 +   
    4.23        //Copy the arg-pointers.. can be more arguments than just the ones 
    4.24        // that StarSs uses to control ordering of task execution.
    4.25     memcpy( newArgs, args, taskType->sizeOfArgs );
    4.26 @@ -239,20 +239,12 @@
    4.27     taskType = semReq->taskType;
    4.28     taskStub = create_task_stub( taskType, args );//copies arg ptrs
    4.29     taskStub->numBlockingProp = taskType->numCtldArgs;
    4.30 -   taskStub->taskID = semReq->taskID; //may be NULL
    4.31 -   taskStub->numChildTasks = 0;
    4.32 +   taskStub->taskID          = semReq->taskID; //may be NULL
    4.33     
    4.34 -   VSsSemData* parentSemData = (VSsSemData*) semReq->callingSlv->semanticData;
    4.35 -   if(parentSemData->taskStub != NULL){ //calling is task
    4.36 -       taskStub->parentIsTask = TRUE;
    4.37 -       taskStub->parent = (void*) parentSemData->taskStub;
    4.38 -       parentSemData->taskStub->numChildTasks++;
    4.39 -   } else {
    4.40 -       taskStub->parentIsTask = FALSE;
    4.41 -       taskStub->parent = (void*) parentSemData->threadInfo;
    4.42 -       parentSemData->threadInfo->numChildTasks++;
    4.43 -   }
    4.44 -   
    4.45 +   VSsSemData* 
    4.46 +   parentSemData = (VSsSemData*) semReq->callingSlv->semanticData;
    4.47 +   taskStub->parentTasksStub = (void*) parentSemData->taskStub;
    4.48 +   parentSemData->taskStub->numLiveChildTasks += 1;
    4.49     
    4.50     /*The controlled arguments are then processed one by one.
    4.51      *Processing an argument means getting the hash of the pointer.  Then,
    4.52 @@ -373,56 +365,45 @@
    4.53   */
    4.54  inline void
    4.55  handleEndTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
    4.56 - { uint32            key[3];
    4.57 -   HashEntry        *rawHashEntry;
    4.58 -   VSsPointerEntry  *ptrEntry; //contents of hash table entry for an arg pointer
    4.59 + { VSsPointerEntry  *ptrEntry; //contents of hash table entry for an arg pointer
    4.60     void            **args;
    4.61 -   VSsTaskStub      *endingTaskStub, *waitingTaskStub;
    4.62 +   VSsSemData       *endingSlvSemData;
    4.63 +   VSsTaskStub      *endingTaskStub, *waitingTaskStub, *parent;
    4.64     VSsTaskType      *endingTaskType;
    4.65     VSsWaiterCarrier *waitingTaskCarrier;
    4.66     VSsPointerEntry **ptrEntries;
    4.67 -      
    4.68 -   HashTable *
    4.69 -   ptrHashTbl = semEnv->argPtrHashTbl;
    4.70 -   
    4.71 +         
    4.72           DEBUG__printf1(dbgRqstHdlr,"EndTask request from processor %d",semReq->callingSlv->slaveID)
    4.73   
    4.74 -   /* ========================== end of task ===========================
    4.75 -    *At the end of a task, the task-stub is sent in the request.
    4.76 -    */
    4.77 -   endingTaskStub =
    4.78 -                ((VSsSemData *)semReq->callingSlv->semanticData)->taskStub;
    4.79 -   args           = endingTaskStub->args;
    4.80 -   endingTaskType = endingTaskStub->taskType;
    4.81 -   ptrEntries     = endingTaskStub->ptrEntries; //saved in stub when create
    4.82 +   endingSlvSemData = (VSsSemData *)semReq->callingSlv->semanticData;
    4.83 +   endingTaskStub   = endingSlvSemData->taskStub;
    4.84 +   args             = endingTaskStub->args;
    4.85 +   endingTaskType   = endingTaskStub->taskType;
    4.86 +   ptrEntries       = endingTaskStub->ptrEntries; //saved in stub when create
    4.87     
    4.88 -   /* Check if parent was waiting on this task */
    4.89 -   if(endingTaskStub->parentIsTask)
    4.90 -    { VSsTaskStub* parent = (VSsTaskStub*) endingTaskStub->parent;
    4.91 -      parent->numChildTasks--;
    4.92 -      if(parent->isWaiting && parent->numChildTasks == 0)
    4.93 -       {
    4.94 -         parent->isWaiting = FALSE;
    4.95 -         resume_slaveVP( parent->slaveAssignedTo, semEnv );
    4.96 -       }
    4.97 -    } 
    4.98 -   else 
    4.99 -    { VSsThreadInfo* parent = (VSsThreadInfo*) endingTaskStub->parent;
   4.100 -      parent->numChildTasks--;
   4.101 -      if(parent->isWaiting && parent->numChildTasks == 0)
   4.102 -       {
   4.103 -         parent->isWaiting = FALSE;
   4.104 -         resume_slaveVP( parent->slaveAssignedTo, semEnv );
   4.105 -       }
   4.106 +      //Check if parent was waiting on this task
   4.107 +   parent = (VSsTaskStub *) endingTaskStub->parentTasksStub;
   4.108 +   parent->numLiveChildTasks -= 1;
   4.109 +   if( parent->isWaitingForChildTasksToEnd && parent->numLiveChildTasks == 0)
   4.110 +    {
   4.111 +      parent->isWaitingForChildTasksToEnd = FALSE;
   4.112 +      resume_slaveVP( parent->slaveAssignedTo, semEnv );
   4.113      }
   4.114     
   4.115 +      //Check if parent ended, and this was last descendent, then free it
   4.116 +   if( parent->isEnded && parent->numLiveChildTasks == 0 )
   4.117 +    { VMS_PI__free( parent );
   4.118 +    }
   4.119 +   
   4.120 +   
   4.121 +      //Now, update state of dependents and start ready tasks
   4.122     /*The task's controlled arguments are processed one by one.
   4.123      *Processing an argument means getting arg-pointer's entry.
   4.124      */
   4.125     int32 argNum;
   4.126     for( argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++ )
   4.127      { 
   4.128 -      /* commented out 'cause saving entry ptr when create stub
   4.129 +      /* commented out 'cause remembering entry ptr when create stub
   4.130        key[0] = 2; //says are 2 32b values in key
   4.131        *( (uint64*)&key[1] ) = args[argNum];  //write 64b ptr into two 32b
   4.132  
   4.133 @@ -519,18 +500,32 @@
   4.134         }//if-else, check of ending task, whether writer or reader
   4.135      }//for argnum in ending task
   4.136     
   4.137 -   //done ending the task, now free the stub + args copy
   4.138 -   VMS_PI__free( endingTaskStub->ptrEntries );
   4.139 -   VMS_PI__free( endingTaskStub );
   4.140     
   4.141 -   //Resume the slave that animated the task -- assigner will give new task
   4.142 -   ((VSsSemData *)semReq->callingSlv->semanticData)->needsTaskAssigned =
   4.143 -      TRUE;
   4.144 -   resume_slaveVP( semReq->callingSlv, semEnv );
   4.145 -
   4.146 -   return;
   4.147 +      //done ending the task, now free the stub + args copy
   4.148 +      // if still has live children, then keep stub around
   4.149 +   if( endingTaskStub->numLiveChildTasks   == 0 &&
   4.150 +       endingTaskStub->numLiveChildThreads == 0 )
   4.151 +    { free_task_stub( endingTaskStub ); 
   4.152 +    }
   4.153 +   
   4.154 +   
   4.155 +   endingSlvSemData->needsTaskAssigned = TRUE;
   4.156 +   
   4.157 +      //Check if the slave is an extra task slave, and put into free Q
   4.158 +   if( endingSlvSemData->slaveType == extraTaskSlv )
   4.159 +    { writePrivQ( semReq->callingSlv, semEnv->freeExtraTaskSlvQ );
   4.160 +    }
   4.161 +   
   4.162 +      //otherwise, it's a slot slave, so it will get used from matrix
   4.163 +      // so, do nothing with it, just return
   4.164 +   return; 
   4.165   }
   4.166  
   4.167 +inline void
   4.168 +free_task_stub( VSsTaskStub *stubToFree )
   4.169 + { VMS_PI__free( stubToFree->ptrEntries );
   4.170 +   VMS_PI__free( stubToFree );
   4.171 + }
   4.172  
   4.173  //========================== Task Comm handlers ===========================
   4.174  
   4.175 @@ -568,7 +563,14 @@
   4.176     key[ receiverIDNumInt ] = semReq->msgType; //no +1 'cause starts at 0
   4.177     
   4.178     entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   4.179 -   if( entry == NULL ) return;  //was just inserted
   4.180 +   if( entry == NULL ) //was just inserted, means task has to wait
   4.181 +    {    //the task is in a slot slave, which stays suspended, so replace
   4.182 +         // it with a new slave (causes it to become an extraTaskSlv)
   4.183 +         //Once the waiting slave resumes and gets to task_end, the task_end
   4.184 +         // puts the slave into the freeExtraTaskSlvQ
   4.185 +      replaceWithNewSlotSlv( receiverSlv, semEnv );
   4.186 +      return;
   4.187 +    }
   4.188  
   4.189        //if here, found a waiting request with same key
   4.190     waitingReq = (VSsSemReq *)entry->content;
   4.191 @@ -667,7 +669,14 @@
   4.192     memcpy( &key[receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32) );
   4.193  
   4.194     entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   4.195 -   if( entry == NULL ) return;  //was just inserted
   4.196 +   if( entry == NULL ) //was just inserted, means task has to wait
   4.197 +    {    //the task is in a slot slave, which stays suspended, so replace
   4.198 +         // it with a new slave (causes it to become an extraTaskSlv)
   4.199 +         //Once the waiting slave resumes and gets to task_end, the task_end
   4.200 +         // puts the slave into the freeExtraTaskSlvQ
   4.201 +      replaceWithNewSlotSlv( receiverSlv, semEnv );
   4.202 +      return;
   4.203 +    }
   4.204  
   4.205     waitingReq = (VSsSemReq *)entry->content;
   4.206  
   4.207 @@ -742,7 +751,14 @@
   4.208  
   4.209  
   4.210     entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );//clones
   4.211 -   if( entry == NULL ) return;  //was just inserted
   4.212 +   if( entry == NULL ) //was just inserted, means task has to wait
   4.213 +    {    //the task is in a slot slave, which stays suspended, so replace
   4.214 +         // it with a new slave (causes it to become an extraTaskSlv)
   4.215 +         //Once the waiting slave resumes and gets to task_end, the task_end
   4.216 +         // puts the slave into the freeExtraTaskSlvQ
   4.217 +      replaceWithNewSlotSlv( receiverSlv, semEnv );
   4.218 +      return;
   4.219 +    }
   4.220  
   4.221     waitingReq = (VSsSemReq *)entry->content;  //previously cloned by insert
   4.222  
   4.223 @@ -821,14 +837,21 @@
   4.224     keySz = (receiverIDNumInt + senderIDNumInt) * sizeof(int32);
   4.225     key = VMS_PI__malloc( keySz );
   4.226     memcpy(  key, receiverID, receiverIDNumInt * sizeof(int32) );
   4.227 -   memcpy( &key[receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32) );
   4.228 +   memcpy( &key[receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32));
   4.229  
   4.230     entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   4.231 -   if( entry == NULL ) return;  //was just inserted
   4.232 +   if( entry == NULL ) //was just inserted, means task has to wait
   4.233 +    {    //the task is in a slot slave, which stays suspended, so replace
   4.234 +         // it with a new slave (causes it to become an extraTaskSlv)
   4.235 +         //Once the waiting slave resumes and gets to task_end, the task_end
   4.236 +         // puts the slave into the freeExtraTaskSlvQ
   4.237 +      replaceWithNewSlotSlv( receiverSlv, semEnv );
   4.238 +      return;
   4.239 +    }
   4.240  
   4.241     waitingReq = (VSsSemReq *)entry->content;
   4.242  
   4.243 -      //At this point, know have waiting request(s) -- should be send(s)
   4.244 +      //At this point, know have a request to rendez-vous -- should be send
   4.245     if( waitingReq->reqType == send_from_to )
   4.246      {    //waiting request is a send, so pair it with this receive
   4.247        #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.248 @@ -866,33 +889,56 @@
   4.249  
   4.250  //==========================================================================
   4.251  inline void
   4.252 +replaceWithNewSlotSlv( SlaveVP *requestingSlv, VSsSemEnv *semEnv )
   4.253 + { SlaveVP *newSlotSlv;
   4.254 +   VSsSemData *semData;
   4.255 +
   4.256 +      //get a new slave to be the slot slave
   4.257 +   newSlotSlv     = readPrivQ( semEnv->freeExtraTaskSlvQ );
   4.258 +   if( newSlotSlv == NULL )
   4.259 +    { newSlotSlv  = VMS_int__create_slaveVP( &idle_fn, NULL );
   4.260 +    }
   4.261 +   
   4.262 +      //set slave values to make it the slot slave
   4.263 +   semData                        = newSlotSlv->semanticData;
   4.264 +   semData->taskStub              = NULL;
   4.265 +   semData->slaveType             = slotTaskSlv;
   4.266 +   semData->needsTaskAssigned     = TRUE;
   4.267 +   newSlotSlv->animSlotAssignedTo = requestingSlv->animSlotAssignedTo;
   4.268 +   newSlotSlv->coreAnimatedBy     = requestingSlv->coreAnimatedBy;
   4.269 +    
   4.270 +      //put it into the slot slave matrix
   4.271 +   int32 slotNum = requestingSlv->animSlotAssignedTo->slotIdx;
   4.272 +   int32 coreNum = requestingSlv->coreAnimatedBy;
   4.273 +   semEnv->currTaskSlvs[coreNum][slotNum] = newSlotSlv;
   4.274 +
   4.275 +      //Fix up requester, to be an extra slave now (but not a free one)
   4.276 +      // because it's not free, doesn't go into freeExtraTaskSlvQ
   4.277 +   semData = requestingSlv->semanticData;
   4.278 +   semData->slaveType = extraTaskSlv;
   4.279 + }
   4.280 +
   4.281 +inline void
   4.282  handleTaskwait( VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv)
   4.283 - {
   4.284 -    VSsTaskStub* requestingTaskStub;
   4.285 + { VSsTaskStub* requestingTaskStub;
   4.286 +   VSsSemData* semData;
   4.287 +         DEBUG__printf1(dbgRqstHdlr,"Taskwait request from processor %d",
   4.288 +                                                      requestingSlv->slaveID)
   4.289      
   4.290 -    DEBUG__printf1(dbgRqstHdlr,"Taskwait request from processor %d",requestingSlv->slaveID)
   4.291 -    
   4.292 -    VSsSemData* semData = ((VSsSemData *)semReq->callingSlv->semanticData);
   4.293 -            
   4.294 -    requestingTaskStub = semData->taskStub;
   4.295 -            
   4.296 -    if(requestingTaskStub == NULL){ //calling VP is hosting a thread
   4.297 -        if(semData->threadInfo->numChildTasks == 0){ //nobody to wait for, proceed
   4.298 -            resume_slaveVP( requestingSlv,   semEnv );
   4.299 -        } else { //have to wait
   4.300 -            semData->threadInfo->isWaiting = TRUE;
   4.301 -            return;
   4.302 -        }
   4.303 -    } else { //calling VP is executing a task
   4.304 -        if(requestingTaskStub->numChildTasks == 0){
   4.305 -            resume_slaveVP( requestingSlv,   semEnv );
   4.306 -        } else { //have to wait
   4.307 -            requestingTaskStub->isWaiting = TRUE;
   4.308 -            return;
   4.309 -        }
   4.310 +   semData = (VSsSemData *)semReq->callingSlv->semanticData;
   4.311 +   requestingTaskStub = semData->taskStub;
   4.312 +   
   4.313 +   if( semData->taskStub->numLiveChildTasks == 0 )
   4.314 +    {    //nobody to wait for, resume
   4.315 +      resume_slaveVP( requestingSlv, semEnv );
   4.316      }
   4.317 -    
   4.318 -}
   4.319 +   else  //have to wait, replace requester with new slot slv & mark waiting
   4.320 +    { 
   4.321 +      replaceWithNewSlotSlv( requestingSlv, semEnv );
   4.322 +      
   4.323 +      requestingTaskStub->isWaitingForChildTasksToEnd = TRUE;
   4.324 +    }    
   4.325 + }
   4.326  
   4.327  
   4.328  //==========================================================================