changeset 15:459055db7fc0 dev_expl_VP_and_DKU

bug fix -- shutsdown correctly now -- count extra task slaves correctly
author Sean Halle <seanhalle@yahoo.com>
date Thu, 23 Aug 2012 03:21:03 -0700
parents b2bc97318262
children 1ffd5df22df9 aad5a2e77163
files VSs.c VSs_PluginFns.c VSs_Request_Handlers.c
diffstat 3 files changed, 32 insertions(+), 24 deletions(-) [+]
line diff
     1.1 --- a/VSs.c	Thu Aug 23 01:27:26 2012 -0700
     1.2 +++ b/VSs.c	Thu Aug 23 03:21:03 2012 -0700
     1.3 @@ -191,9 +191,6 @@
     1.4        //Hook up the semantic layer's plug-ins to the Master virt procr
     1.5     _VMSMasterEnv->requestHandler = &VSs__Request_Handler;
     1.6     _VMSMasterEnv->slaveAssigner  = &VSs__assign_slaveVP_to_slot;
     1.7 -   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
     1.8 -   _VMSMasterEnv->counterHandler = &VSs__counter_handler;
     1.9 -   #endif
    1.10  
    1.11        //create the semantic layer's environment (all its data) and add to
    1.12        // the master environment
    1.13 @@ -201,6 +198,7 @@
    1.14     _VMSMasterEnv->semanticEnv = semanticEnv;
    1.15     
    1.16     #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    1.17 +   _VMSMasterEnv->counterHandler = &VSs__counter_handler;
    1.18     VSs__init_counter_data_structs();
    1.19     #endif
    1.20  
    1.21 @@ -255,7 +253,7 @@
    1.22      }
    1.23  
    1.24     semanticEnv->numLiveExtraTaskSlvs   = 0; //must be last
    1.25 -   semanticEnv->numLiveThreadSlvs      = 1; //must be last, count the seed
    1.26 +   semanticEnv->numLiveThreadSlvs      = 1; //must be last, counts the seed
    1.27  
    1.28     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    1.29     semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
     2.1 --- a/VSs_PluginFns.c	Thu Aug 23 01:27:26 2012 -0700
     2.2 +++ b/VSs_PluginFns.c	Thu Aug 23 03:21:03 2012 -0700
     2.3 @@ -62,15 +62,19 @@
     2.4     
     2.5     semEnv  = (VSsSemEnv *)_semEnv;
     2.6     
     2.7 -        returnSlv = readPrivQ( semEnv->slavesReadyToResumeQ );
     2.8 -      if( returnSlv != NULL )  //Yes, have a slave, so return it.
     2.9 -       { returnSlv->coreAnimatedBy   = coreNum;
    2.10 -         if( semEnv->coreIsDone[coreNum] == TRUE ) //reads are higher perf
    2.11 -            semEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
    2.12 -         goto ReturnTheSlv;
    2.13 -       }
    2.14 -      //Speculatively set the return slave to the slot taskSlave
    2.15 -      //TODO: false sharing ?  Always read..
    2.16 +      //Check for suspended slaves that are ready to resume
    2.17 +   returnSlv = readPrivQ( semEnv->slavesReadyToResumeQ );
    2.18 +   if( returnSlv != NULL )  //Yes, have a slave, so return it.
    2.19 +    { returnSlv->coreAnimatedBy   = coreNum;
    2.20 +    
    2.21 +         //have work, so reset Done flag (when work generated on other core)
    2.22 +      if( semEnv->coreIsDone[coreNum] == TRUE ) //reads are higher perf
    2.23 +         semEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
    2.24 +      goto ReturnTheSlv;
    2.25 +    }
    2.26 +   
    2.27 +      //If none, speculate will have a task, so get the slot slave
    2.28 +      //TODO: false sharing ?  (think not bad cause mostly read..)
    2.29     returnSlv = semEnv->slotTaskSlvs[coreNum][slotNum];
    2.30     
    2.31     semData = (VSsSemData *)returnSlv->semanticData;
    2.32 @@ -85,17 +89,18 @@
    2.33        semData->taskStub            = newTaskStub;
    2.34        newTaskStub->slaveAssignedTo = returnSlv;
    2.35        semData->needsTaskAssigned   = FALSE;
    2.36 +      
    2.37 +         //have work, so reset Done flag, if was set
    2.38        if( semEnv->coreIsDone[coreNum] == TRUE ) //reads are higher perf
    2.39           semEnv->coreIsDone[coreNum] = FALSE;   //don't just write always
    2.40        goto ReturnTheSlv;
    2.41      }
    2.42     else
    2.43 -    {    //no task, so try to get a ready to resume slave
    2.44 -      
    2.45 -         //If get here, then no task, so check if have extra free slaves
    2.46 +    {    //no task, so try to clean up unused extra task slaves
    2.47        extraSlv = readPrivQ( semEnv->freeExtraTaskSlvQ );
    2.48        if( extraSlv != NULL )
    2.49 -       {    //means have two slaves need tasks -- redundant, kill one
    2.50 +       {    //have two slaves need tasks, so delete one
    2.51 +            //This both bounds the num extras, and delivers shutdown cond
    2.52           handleDissipate( extraSlv, semEnv );
    2.53              //then return NULL
    2.54           returnSlv = NULL;
    2.55 @@ -104,7 +109,7 @@
    2.56        else
    2.57         { //candidate for shutdown.. if all extras dissipated, and no tasks
    2.58           // and no ready to resume slaves, then no way to generate
    2.59 -         // more tasks..
    2.60 +         // more tasks (on this core -- other core might have task still)
    2.61           if( semEnv->numLiveExtraTaskSlvs == 0 && 
    2.62               semEnv->numLiveThreadSlvs == 0 )
    2.63            { //This core sees no way to generate more tasks, so say it
    2.64 @@ -113,6 +118,7 @@
    2.65                 semEnv->coreIsDone[coreNum] = TRUE;
    2.66                 #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    2.67                 semEnv->shutdownInitiated = TRUE;
    2.68 +               
    2.69                 #else
    2.70                 if( semEnv->numCoresDone == NUM_CORES )
    2.71                  { //means no cores have work, and none can generate more
    2.72 @@ -137,9 +143,7 @@
    2.73     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    2.74     if( returnSlv == NULL )
    2.75      { returnSlv = semEnv->idleSlv[coreNum][slotNum]; 
    2.76 -      if(semEnv->shutdownInitiated) 
    2.77 -       { returnSlv = VMS_SS__create_shutdown_slave();
    2.78 -       }
    2.79 +    
    2.80           //things that would normally happen in resume(), but these VPs
    2.81           // never go there
    2.82        returnSlv->assignCount++; //Somewhere here!
    2.83 @@ -290,6 +294,7 @@
    2.84  
    2.85        //if make it to here, then is a thread slave ending
    2.86     semEnv->numLiveThreadSlvs -= 1; //for detecting shutdown condition
    2.87 +   
    2.88     ownTaskStub    = semData->taskStub;
    2.89     parentTaskStub = ownTaskStub->parentTaskStub;
    2.90     parentTaskStub->numLiveChildThreads -= 1;  //not freed, even if ended
    2.91 @@ -319,12 +324,15 @@
    2.92            parentTaskStub->numLiveChildThreads == 0 )
    2.93           free_task_stub( parentTaskStub ); //just stub, semData already freed
    2.94      }
    2.95 -    
    2.96 +
    2.97        //Free the semData and requesting slave's base state for all cases
    2.98   FreeSlaveStateAndReturn:
    2.99     VMS_PI__free( semData );
   2.100     VMS_PI__dissipate_slaveVP( requestingSlv );
   2.101     return; 
   2.102 +      //Note, this is not a location to check for shutdown because doesn't
   2.103 +      // say anything about work availability here.. check for shutdown in
   2.104 +      // places try to get work for the core (in the assigner)
   2.105   }
   2.106  
   2.107     
   2.108 @@ -347,7 +355,7 @@
   2.109     semData->highestTransEntered = -1;
   2.110     semData->lastTransEntered    = NULL;
   2.111     semData->needsTaskAssigned   = TRUE;
   2.112 -   semData->taskStub =NULL;
   2.113 +   semData->taskStub            = NULL;
   2.114     
   2.115     newSlv->semanticData = semData;
   2.116  
   2.117 @@ -371,7 +379,7 @@
   2.118      }
   2.119     #endif
   2.120     //========================================================================
   2.121 -
   2.122 +   
   2.123     return newSlv;
   2.124   }
   2.125  
     3.1 --- a/VSs_Request_Handlers.c	Thu Aug 23 01:27:26 2012 -0700
     3.2 +++ b/VSs_Request_Handlers.c	Thu Aug 23 03:21:03 2012 -0700
     3.3 @@ -915,6 +915,8 @@
     3.4     newSlotSlv     = readPrivQ( semEnv->freeExtraTaskSlvQ );
     3.5     if( newSlotSlv == NULL )
     3.6      { newSlotSlv  = VSs__create_slave_helper( &idle_fn, NULL, semEnv, 0);
     3.7 +         //just made a new extra task slave, so count it
     3.8 +      semEnv->numLiveExtraTaskSlvs += 1;
     3.9      }
    3.10     
    3.11        //set slave values to make it the slot slave