diff VMS.c @ 57:85b731b290f8

Merge between VCilk and SSR intermediate Nov 4
author Me
date Thu, 04 Nov 2010 18:27:27 -0700
parents 420a09d3f32a f8508572f3de
children 26d53313a8f2
line diff
     1.1 --- a/VMS.c	Thu Nov 04 17:57:39 2010 -0700
     1.2 +++ b/VMS.c	Thu Nov 04 18:27:27 2010 -0700
     1.3 @@ -33,6 +33,8 @@
     1.4  MallocProlog *
     1.5  create_free_list();
     1.6  
     1.7 +void
     1.8 +endOSThreadFn( void *initData, VirtProcr *animatingPr );
     1.9  
    1.10  pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
    1.11  pthread_cond_t  suspend_cond  = PTHREAD_COND_INITIALIZER;
    1.12 @@ -89,55 +91,55 @@
    1.13     int              coreIdx;
    1.14     VirtProcr      **masterVPs;
    1.15     SchedSlot     ***allSchedSlots; //ptr to array of ptrs
    1.16 -   
    1.17 +
    1.18 +
    1.19        //Make the master env, which holds everything else
    1.20     _VMSMasterEnv = malloc( sizeof(MasterEnv) );
    1.21 +
    1.22 +        //Very first thing put into the master env is the free-list, seeded
    1.23 +        // with a massive initial chunk of memory.
    1.24 +        //After this, all other mallocs are VMS__malloc.
    1.25 +   _VMSMasterEnv->freeListHead        = VMS_ext__create_free_list();
    1.26 +
    1.27 +   //===================== Only VMS__malloc after this ====================
    1.28     masterEnv     = _VMSMasterEnv;
    1.29 -      //Need to set start pt here 'cause used by seed procr, which is created
    1.30 -      // before the first core loop starts up. -- not sure how yet..
    1.31 -//   masterEnv->coreLoopStartPt = ;
    1.32 -//   masterEnv->coreLoopEndPt   = ;
    1.33     
    1.34        //Make a readyToAnimateQ for each core loop
    1.35 -   readyToAnimateQs = malloc( NUM_CORES * sizeof(SRSWQueueStruc *) );
    1.36 -   masterVPs        = malloc( NUM_CORES * sizeof(VirtProcr *) );
    1.37 +   readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(SRSWQueueStruc *) );
    1.38 +   masterVPs        = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) );
    1.39  
    1.40        //One array for each core, 3 in array, core's masterVP scheds all
    1.41 -   allSchedSlots    = malloc( NUM_CORES * sizeof(SchedSlot *) );
    1.42 +   allSchedSlots    = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) );
    1.43  
    1.44 +   _VMSMasterEnv->numProcrsCreated = 0;  //used by create procr
    1.45     for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
    1.46 -    {    //running in main thread -- normal malloc inside makeSRSWQ
    1.47 +    {    
    1.48        readyToAnimateQs[ coreIdx ] = makeSRSWQ();
    1.49        
    1.50           //Q: should give masterVP core-specific info as its init data?
    1.51 -      masterVPs[ coreIdx ] = VMS_ext__create_procr( &masterLoop, masterEnv );
    1.52 +      masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv );
    1.53        masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
    1.54        allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
    1.55 -      _VMSMasterEnv->numMasterInARow[ coreIdx ] = FALSE;
    1.56 +      _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
    1.57      }
    1.58     _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
    1.59     _VMSMasterEnv->masterVPs        = masterVPs;
    1.60     _VMSMasterEnv->masterLock       = UNLOCKED;
    1.61     _VMSMasterEnv->allSchedSlots    = allSchedSlots;
    1.62 -   _VMSMasterEnv->numProcrsCreated = 0;
    1.63  
    1.64  
    1.65        //Aug 19, 2010:  no longer need to place initial masterVP into queue
    1.66        // because coreLoop now controls -- animates its masterVP when no work
    1.67  
    1.68 -   _VMSMasterEnv->freeListHead        = VMS__create_free_list();
    1.69 -   _VMSMasterEnv->amtOfOutstandingMem = 0; //none allocated yet
    1.70  
    1.71     //============================= MEASUREMENT STUFF ========================
    1.72     #ifdef STATS__TURN_ON_PROBES
    1.73 -      //creates intervalProbes array and sets pointer to it in masterEnv too
    1.74     _VMSMasterEnv->dynIntervalProbesInfo =
    1.75 -                  makeDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 20 );
    1.76 +              makePrivDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 200);
    1.77  
    1.78 -   _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, NULL );
    1.79 -   _VMSMasterEnv->masterCreateProbeID =
    1.80 -       VMS_ext__record_time_point_into_new_probe( "masterCreateProbe" );
    1.81 -      //Also put creation time directly into master env, for fast retrieval
    1.82 +   _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free );
    1.83 +   
    1.84 +      //put creation time directly into master env, for fast retrieval
    1.85     struct timeval timeStamp;
    1.86     gettimeofday( &(timeStamp), NULL);
    1.87     _VMSMasterEnv->createPtInSecs =
    1.88 @@ -152,11 +154,11 @@
    1.89   { SchedSlot  **schedSlots;
    1.90     int i;
    1.91  
    1.92 -   schedSlots  = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
    1.93 +   schedSlots  = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
    1.94  
    1.95     for( i = 0; i < NUM_SCHED_SLOTS; i++ )
    1.96      {
    1.97 -      schedSlots[i] = malloc( sizeof(SchedSlot) );
    1.98 +      schedSlots[i] = VMS__malloc( sizeof(SchedSlot) );
    1.99  
   1.100           //Set state to mean "handling requests done, slot needs filling"
   1.101        schedSlots[i]->workIsDone         = FALSE;
   1.102 @@ -171,9 +173,9 @@
   1.103   { int i;
   1.104     for( i = 0; i < NUM_SCHED_SLOTS; i++ )
   1.105      {
   1.106 -      free( schedSlots[i] );
   1.107 +      VMS__free( schedSlots[i] );
   1.108      }
   1.109 -   free( schedSlots );
   1.110 +   VMS__free( schedSlots );
   1.111   }
   1.112  
   1.113  
   1.114 @@ -191,7 +193,7 @@
   1.115  
   1.116        //Make the threads that animate the core loops
   1.117     for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   1.118 -    { coreLoopThdParams[coreIdx]          = malloc( sizeof(ThdParams) );
   1.119 +    { coreLoopThdParams[coreIdx]          = VMS__malloc( sizeof(ThdParams) );
   1.120        coreLoopThdParams[coreIdx]->coreNum = coreIdx;
   1.121  
   1.122        retCode =
   1.123 @@ -263,11 +265,12 @@
   1.124   {
   1.125     char  *stackPtr;
   1.126  
   1.127 -   newPr->procrID     = _VMSMasterEnv->numProcrsCreated++;
   1.128 -   newPr->nextInstrPt = fnPtr;
   1.129 -   newPr->initialData = initialData;
   1.130 -   newPr->requests    = NULL;
   1.131 -   newPr->schedSlot   = NULL;
   1.132 +   newPr->startOfStack = stackLocs;
   1.133 +   newPr->procrID      = _VMSMasterEnv->numProcrsCreated++;
   1.134 +   newPr->nextInstrPt  = fnPtr;
   1.135 +   newPr->initialData  = initialData;
   1.136 +   newPr->requests     = NULL;
   1.137 +   newPr->schedSlot    = NULL;
   1.138  
   1.139        //fnPtr takes two params -- void *initData & void *animProcr
   1.140        //alloc stack locations, make stackPtr be the highest addr minus room
   1.141 @@ -285,7 +288,8 @@
   1.142     #ifdef STATS__TURN_ON_PROBES
   1.143     struct timeval timeStamp;
   1.144     gettimeofday( &(timeStamp), NULL);
   1.145 -   newPr->createPtInSecs = timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
   1.146 +   newPr->createPtInSecs = timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0) -
   1.147 +                                               _VMSMasterEnv->createPtInSecs;
   1.148     #endif
   1.149     //========================================================================
   1.150  
   1.151 @@ -301,7 +305,6 @@
   1.152     stackLocs  = VMS__malloc( VIRT_PROCR_STACK_SIZE );
   1.153     if( stackLocs == 0 )
   1.154      { perror("VMS__malloc stack"); exit(1); }
   1.155 -   newPr->startOfStack = stackLocs;
   1.156  
   1.157     return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
   1.158   }
   1.159 @@ -319,7 +322,6 @@
   1.160     stackLocs  = malloc( VIRT_PROCR_STACK_SIZE );
   1.161     if( stackLocs == 0 )
   1.162      { perror("malloc stack"); exit(1); }
   1.163 -   newPr->startOfStack = stackLocs;
   1.164  
   1.165     return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
   1.166   }
   1.167 @@ -477,18 +479,26 @@
   1.168  
   1.169  
   1.170  
   1.171 -/*This inserts the semantic-layer's request data into standard VMS carrier
   1.172 - * request data-struct is allocated on stack of this call & ptr to it sent
   1.173 - * to plugin
   1.174 +/*This call's name indicates that request is malloc'd -- so req handler
   1.175 + * has to free any extra requests tacked on before a send, using this.
   1.176 + *
   1.177 + * This inserts the semantic-layer's request data into standard VMS carrier
   1.178 + * request data-struct that is mallocd.  The sem request doesn't need to
   1.179 + * be malloc'd if this is called inside the same call chain before the
   1.180 + * send of the last request is called.
   1.181 + *
   1.182 + *The request handler has to call VMS__free_VMSReq for any of these
   1.183   */
   1.184  inline void
   1.185 -VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
   1.186 - { VMSReqst req;
   1.187 +VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
   1.188 +                                          VirtProcr *callingPr )
   1.189 + { VMSReqst *req;
   1.190  
   1.191 -   req.reqType         = semantic;
   1.192 -   req.semReqData      = semReqData;
   1.193 -   req.nextReqst       = callingPr->requests;
   1.194 -   callingPr->requests = &req;
   1.195 +   req = VMS__malloc( sizeof(VMSReqst) );
   1.196 +   req->reqType         = semantic;
   1.197 +   req->semReqData      = semReqData;
   1.198 +   req->nextReqst       = callingPr->requests;
   1.199 +   callingPr->requests = req;
   1.200   }
   1.201  
   1.202  /*This inserts the semantic-layer's request data into standard VMS carrier
   1.203 @@ -573,10 +583,12 @@
   1.204     memcpy( newProbe->nameStr, semReq->nameStr, nameLen );
   1.205     newProbe->hist    = NULL;
   1.206     newProbe->schedChoiceWasRecorded = FALSE;
   1.207 +
   1.208 +      //This runs in masterVP, so no race-condition worries
   1.209     newProbe->probeID =
   1.210               addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo );
   1.211  
   1.212 -   requestingPr->dataReturnedFromReq = newProbe;
   1.213 +   requestingPr->dataRetFromReq = newProbe;
   1.214  
   1.215     (*resumePrFnPtr)( requestingPr, semEnv );
   1.216   }
   1.217 @@ -619,15 +631,13 @@
   1.218   }
   1.219  
   1.220  
   1.221 -//TODO: re-architect so that have clean separation between request handler
   1.222 +//TODO: look at architecting cleanest separation between request handler
   1.223  // and master loop, for dissipate, create, shutdown, and other non-semantic
   1.224  // requests.  Issue is chain: one removes requests from AppVP, one dispatches
   1.225  // on type of request, and one handles each type..  but some types require
   1.226  // action from both request handler and master loop -- maybe just give the
   1.227  // request handler calls like:  VMS__handle_X_request_type
   1.228  
   1.229 -void
   1.230 -endOSThreadFn( void *initData, VirtProcr *animatingPr );
   1.231  
   1.232  /*This is called by the semantic layer's request handler when it decides its
   1.233   * time to shut down the VMS system.  Calling this causes the core loop OS
   1.234 @@ -641,10 +651,9 @@
   1.235   * masterVP any AppVPs that might still be allocated and sitting in the
   1.236   * semantic environment, or have been orphaned in the _VMSWorkQ.
   1.237   * 
   1.238 - *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the
   1.239 + *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
   1.240   * locations it needs, and give ownership to masterVP.  Then, they will be
   1.241 - * automatically freed when the masterVP is dissipated.  (This happens after
   1.242 - * the core loop threads have all exited)
   1.243 + * automatically freed.
   1.244   *
   1.245   *In here,create one core-loop shut-down processor for each core loop and put
   1.246   * them all directly into the readyToAnimateQ.
   1.247 @@ -655,7 +664,7 @@
   1.248   * point is it sure that all results have completed.
   1.249   */
   1.250  void
   1.251 -VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr )
   1.252 +VMS__shutdown()
   1.253   { int coreIdx;
   1.254     VirtProcr *shutDownPr;
   1.255  
   1.256 @@ -703,19 +712,19 @@
   1.257   }
   1.258  
   1.259  
   1.260 -/*This is called after the threads have shut down and control has returned
   1.261 - * to the semantic layer, in the entry point function in the main thread.
   1.262 - * It has to free anything allocated during VMS_init, and any other alloc'd
   1.263 - * locations that might be left over.
   1.264 +/*This is called from the startup & shutdown
   1.265   */
   1.266  void
   1.267 -VMS__cleanup_after_shutdown()
   1.268 +VMS__cleanup_at_end_of_shutdown()
   1.269   { 
   1.270     SRSWQueueStruc **readyToAnimateQs;
   1.271     int              coreIdx;
   1.272     VirtProcr      **masterVPs;
   1.273     SchedSlot     ***allSchedSlots; //ptr to array of ptrs
   1.274  
   1.275 +      //All the environment data has been allocated with VMS__malloc, so just
   1.276 +      // free its internal big-chunk and all inside it disappear.
   1.277 +/*
   1.278     readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
   1.279     masterVPs        = _VMSMasterEnv->masterVPs;
   1.280     allSchedSlots    = _VMSMasterEnv->allSchedSlots;
   1.281 @@ -724,23 +733,40 @@
   1.282      {
   1.283        freeSRSWQ( readyToAnimateQs[ coreIdx ] );
   1.284           //master VPs were created external to VMS, so use external free
   1.285 -      VMS_ext__dissipate_procr( masterVPs[ coreIdx ] );
   1.286 +      VMS__dissipate_procr( masterVPs[ coreIdx ] );
   1.287        
   1.288        freeSchedSlots( allSchedSlots[ coreIdx ] );
   1.289      }
   1.290     
   1.291 -   free( _VMSMasterEnv->readyToAnimateQs );
   1.292 -   free( _VMSMasterEnv->masterVPs );
   1.293 -   free( _VMSMasterEnv->allSchedSlots );
   1.294 +   VMS__free( _VMSMasterEnv->readyToAnimateQs );
   1.295 +   VMS__free( _VMSMasterEnv->masterVPs );
   1.296 +   VMS__free( _VMSMasterEnv->allSchedSlots );
   1.297     
   1.298 -   VMS_ext__free_free_list( _VMSMasterEnv->freeListHead );
   1.299 -
   1.300     //============================= MEASUREMENT STUFF ========================
   1.301     #ifdef STATS__TURN_ON_PROBES
   1.302 -   freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &free );
   1.303 +   freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe);
   1.304     #endif
   1.305     //========================================================================
   1.306 -
   1.307 -   free( _VMSMasterEnv );
   1.308 +*/
   1.309 +      //These are the only two that use system free 
   1.310 +   VMS_ext__free_free_list( _VMSMasterEnv->freeListHead );
   1.311 +   free( (void *)_VMSMasterEnv );
   1.312   }
   1.313  
   1.314 +
   1.315 +//================================
   1.316 +
   1.317 +
   1.318 +/*Later, improve this -- for now, just exits the application after printing
   1.319 + * the error message.
   1.320 + */
   1.321 +void
   1.322 +VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData )
   1.323 + {
   1.324 +   printf(msgStr);
   1.325 +   fflush(stdin);
   1.326 +   exit(1);
   1.327 + }
   1.328 +
   1.329 +
   1.330 +