diff VMS.c @ 31:e69579a0e797

Works multi-core.. pinned VP to a core loop
author Me
date Wed, 01 Sep 2010 08:23:39 -0700
parents c8823e0bb2b4
children 17d20e5cf924
line diff
     1.1 --- a/VMS.c	Mon Aug 09 02:24:31 2010 -0700
     1.2 +++ b/VMS.c	Wed Sep 01 08:23:39 2010 -0700
     1.3 @@ -18,8 +18,8 @@
     1.4  void
     1.5  shutdownFn( void *dummy, VirtProcr *dummy2 );
     1.6  
     1.7 -void
     1.8 -create_sched_slots( MasterEnv *masterEnv );
     1.9 +SchedSlot **
    1.10 +create_sched_slots();
    1.11  
    1.12  void
    1.13  create_masterEnv();
    1.14 @@ -48,7 +48,7 @@
    1.15   * initial virt procrs, ready to schedule them to slots when the masterLoop
    1.16   * asks.  Without this pattern, the semantic layer's setup would
    1.17   * have to modify slots directly to assign the initial virt-procrs, and put
    1.18 - * them into the workQ itself, breaking the isolation completely.
    1.19 + * them into the readyToAnimateQ itself, breaking the isolation completely.
    1.20   *
    1.21   * 
    1.22   *The semantic layer creates the initial virt procr(s), and adds its
    1.23 @@ -77,29 +77,45 @@
    1.24  
    1.25  void
    1.26  create_masterEnv()
    1.27 - { MasterEnv  *masterEnv;
    1.28 -   VMSQueueStruc *workQ;
    1.29 -
    1.30 -      //Make the central work-queue
    1.31 -   _VMSWorkQ = makeVMSQ();
    1.32 -   workQ     = _VMSWorkQ;
    1.33 -
    1.34 + { MasterEnv       *masterEnv;
    1.35 +   SRSWQueueStruc **readyToAnimateQs;
    1.36 +   int              coreIdx;
    1.37 +   VirtProcr      **masterVPs;
    1.38 +   SchedSlot     ***allSchedSlots; //ptr to array of ptrs
    1.39 +   
    1.40 +      //Make the master env, which holds everything else
    1.41     _VMSMasterEnv = malloc( sizeof(MasterEnv) );
    1.42     masterEnv     = _VMSMasterEnv;
    1.43 +      //Need to set start pt here 'cause used by seed procr, which is created
    1.44 +      // before the first core loop starts up. -- not sure how yet..
    1.45 +//   masterEnv->coreLoopStartPt = ;
    1.46 +//   masterEnv->coreLoopEndPt   = ;
    1.47 +   
    1.48 +      //Make a readyToAnimateQ for each core loop
    1.49 +   readyToAnimateQs = malloc( NUM_CORES * sizeof(SRSWQueueStruc *) );
    1.50 +   masterVPs        = malloc( NUM_CORES * sizeof(VirtProcr *) );
    1.51  
    1.52 -      //create the master virtual processor
    1.53 -   masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
    1.54 +      //One array for each core, 3 in array, core's masterVP scheds all
    1.55 +   allSchedSlots    = malloc( NUM_CORES * sizeof(SchedSlot *) );
    1.56  
    1.57 -   create_sched_slots( masterEnv );
    1.58 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
    1.59 +    {
    1.60 +      readyToAnimateQs[ coreIdx ] = makeSRSWQ();
    1.61 +      
    1.62 +         //Q: should give masterVP core-specific into as its init data?
    1.63 +      masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv );
    1.64 +      masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
    1.65 +      allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
    1.66 +    }
    1.67 +   _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
    1.68 +   _VMSMasterEnv->masterVPs        = masterVPs;
    1.69 +   _VMSMasterEnv->allSchedSlots    = allSchedSlots;
    1.70  
    1.71 -   masterEnv->stillRunning = FALSE;
    1.72 -   masterEnv->numToPrecede = NUM_CORES;
    1.73  
    1.74 -      //First core loop to start up gets this, which will schedule seed Pr
    1.75 -      //TODO: debug: check address of masterVirtPr
    1.76 -   writeVMSQ( masterEnv->masterVirtPr, workQ );
    1.77  
    1.78 -   numProcrsCreated = 1;  //global counter for debugging
    1.79 +      //Aug 19, 2010:  no longer need to place initial masterVP into queue
    1.80 +      // because coreLoop now controls -- animates its masterVP when no work
    1.81 +
    1.82  
    1.83     //==================== malloc substitute ========================
    1.84     //
    1.85 @@ -143,15 +159,12 @@
    1.86   }
    1.87   */
    1.88  
    1.89 -void
    1.90 -create_sched_slots( MasterEnv *masterEnv )
    1.91 - { SchedSlot  **schedSlots, **filledSlots;
    1.92 +SchedSlot **
    1.93 +create_sched_slots()
    1.94 + { SchedSlot  **schedSlots;
    1.95     int i;
    1.96  
    1.97     schedSlots  = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
    1.98 -   filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
    1.99 -   masterEnv->schedSlots  = schedSlots;
   1.100 -   masterEnv->filledSlots = filledSlots;
   1.101  
   1.102     for( i = 0; i < NUM_SCHED_SLOTS; i++ )
   1.103      {
   1.104 @@ -161,6 +174,18 @@
   1.105        schedSlots[i]->workIsDone         = FALSE;
   1.106        schedSlots[i]->needsProcrAssigned = TRUE;
   1.107      }
   1.108 +   return schedSlots;
   1.109 + }
   1.110 +
   1.111 +
   1.112 +void
   1.113 +freeSchedSlots( SchedSlot **schedSlots )
   1.114 + { int i;
   1.115 +   for( i = 0; i < NUM_SCHED_SLOTS; i++ )
   1.116 +    {
   1.117 +      free( schedSlots[i] );
   1.118 +    }
   1.119 +   free( schedSlots );
   1.120   }
   1.121  
   1.122  
   1.123 @@ -267,6 +292,8 @@
   1.124     newPr->procrID     = numProcrsCreated++;
   1.125     newPr->nextInstrPt = fnPtr;
   1.126     newPr->initialData = initialData;
   1.127 +   newPr->requests    = NULL;
   1.128 +//   newPr->coreLoopStartPt = _VMSMasterEnv->coreLoopStartPt;
   1.129  
   1.130        //fnPtr takes two params -- void *initData & void *animProcr
   1.131        //alloc stack locations, make stackPtr be the highest addr minus room
   1.132 @@ -314,7 +341,7 @@
   1.133     stackPtrAddr      = &(callingPr->stackPtr);
   1.134     framePtrAddr      = &(callingPr->framePtr);
   1.135  
   1.136 -   jmpPt             = callingPr->coreLoopStartPt;
   1.137 +   jmpPt             = _VMSMasterEnv->coreLoopStartPt;
   1.138     coreLoopFramePtr  = callingPr->coreLoopFramePtr;//need this only
   1.139     coreLoopStackPtr  = callingPr->coreLoopStackPtr;//shouldn't need -- safety
   1.140  
   1.141 @@ -350,21 +377,6 @@
   1.142  
   1.143  
   1.144  
   1.145 -/*This is equivalent to "jump back to core loop" -- it's mainly only used
   1.146 - * just after adding dissipate request to a processor -- so the semantic
   1.147 - * layer is the only place it will be seen and/or used.
   1.148 - *
   1.149 - *It does almost the same thing as suspend, except don't need to save the
   1.150 - * stack nor set the nextInstrPt
   1.151 - *
   1.152 - *As of June 30, 2010  just implementing as a call to suspend -- just sugar
   1.153 - */
   1.154 -void
   1.155 -VMS__return_from_fn( VirtProcr *animatingPr )
   1.156 - {
   1.157 -   VMS__suspend_procr( animatingPr );
   1.158 - }
   1.159 -
   1.160  
   1.161  /*Not sure yet the form going to put "dissipate" in, so this is the third
   1.162   * possibility -- the semantic layer can just make a macro that looks like
   1.163 @@ -439,7 +451,7 @@
   1.164  //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
   1.165  // of a request -- IE call with both a virt procr and a fn-ptr to request
   1.166  // freer (also maybe put sem request freer as a field in virt procr?)
   1.167 -//VMSHW relies right now on this only freeing VMS layer of request -- the
   1.168 +//SSR relies right now on this only freeing VMS layer of request -- the
   1.169  // semantic portion of request is alloc'd and freed by request handler
   1.170  void
   1.171  VMS__free_request( VMSReqst *req )
   1.172 @@ -453,11 +465,23 @@
   1.173  
   1.174     req = procrWithReq->requests;
   1.175     if( req == NULL ) return req;
   1.176 -   
   1.177 +
   1.178     procrWithReq->requests = procrWithReq->requests->nextReqst;
   1.179     return req;
   1.180   }
   1.181  
   1.182 +VMSReqst *
   1.183 +VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq )
   1.184 + { VMSReqst *req;
   1.185 +
   1.186 +   req = procrWithReq->requests;
   1.187 +   if( req == NULL ) return req;
   1.188 +
   1.189 +   procrWithReq->requests = procrWithReq->requests->nextReqst;
   1.190 +   VMS__free_request( req );
   1.191 +   return procrWithReq->requests;
   1.192 + }
   1.193 +
   1.194  inline int
   1.195  VMS__isSemanticReqst( VMSReqst *req )
   1.196   {
   1.197 @@ -562,7 +586,7 @@
   1.198   * the core loop threads have all exited)
   1.199   *
   1.200   *In here,create one core-loop shut-down processor for each core loop and put
   1.201 - * them all directly into the workQ.
   1.202 + * them all directly into the readyToAnimateQ.
   1.203   *Note, this function can ONLY be called after the semantic environment no
   1.204   * longer cares if AppVPs get animated after the point this is called.  In
   1.205   * other words, this can be used as an abort, or else it should only be
   1.206 @@ -573,15 +597,13 @@
   1.207  VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr )
   1.208   { int coreIdx;
   1.209     VirtProcr *shutDownPr;
   1.210 -   VMSQueueStruc *workQ = _VMSWorkQ;
   1.211  
   1.212        //create the shutdown processors, one for each core loop -- put them
   1.213 -      // directly into _VMSWorkQ -- each core will die when gets one, so
   1.214 -      // the system distributes them evenly itself.
   1.215 +      // directly into the Q -- each core will die when gets one
   1.216     for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   1.217      {
   1.218        shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
   1.219 -      writeVMSQ( shutDownPr, workQ );
   1.220 +      writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
   1.221      }
   1.222  
   1.223   }
   1.224 @@ -620,26 +642,36 @@
   1.225   }
   1.226  
   1.227  
   1.228 -
   1.229 -/*This is called after the threads have shut down and control as returned
   1.230 +/*This is called after the threads have shut down and control has returned
   1.231   * to the semantic layer, in the entry point function in the main thread.
   1.232   * It has to free anything allocated during VMS_init, and any other alloc'd
   1.233   * locations that might be left over.
   1.234   */
   1.235  void
   1.236  VMS__cleanup_after_shutdown()
   1.237 - { int i;
   1.238 - 
   1.239 -   free( _VMSWorkQ );
   1.240 -   free( _VMSMasterEnv->filledSlots );
   1.241 -   for( i = 0; i < NUM_SCHED_SLOTS; i++ )
   1.242 + { 
   1.243 +   SRSWQueueStruc **readyToAnimateQs;
   1.244 +   int              coreIdx;
   1.245 +   VirtProcr      **masterVPs;
   1.246 +   SchedSlot     ***allSchedSlots; //ptr to array of ptrs
   1.247 +
   1.248 +   readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
   1.249 +   masterVPs        = _VMSMasterEnv->masterVPs;
   1.250 +   allSchedSlots    = _VMSMasterEnv->allSchedSlots;
   1.251 +   
   1.252 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   1.253      {
   1.254 -      free( _VMSMasterEnv->schedSlots[i] );
   1.255 +      freeSRSWQ( readyToAnimateQs[ coreIdx ] );
   1.256 +
   1.257 +      VMS__handle_dissipate_reqst( masterVPs[ coreIdx ] );
   1.258 +      
   1.259 +      freeSchedSlots( allSchedSlots[ coreIdx ] );
   1.260      }
   1.261 +   
   1.262 +   free( _VMSMasterEnv->readyToAnimateQs );
   1.263 +   free( _VMSMasterEnv->masterVPs );
   1.264 +   free( _VMSMasterEnv->allSchedSlots );
   1.265  
   1.266 -   free( _VMSMasterEnv->schedSlots);
   1.267 -   VMS__handle_dissipate_reqst( _VMSMasterEnv->masterVirtPr );
   1.268 -   
   1.269     free( _VMSMasterEnv );
   1.270   }
   1.271