diff VMS.c @ 22:1dbc7f6e3e67

Full VMS test -- works
author Me
date Wed, 30 Jun 2010 13:10:59 -0700
parents 734c665500e4
children 2b161e1a50ee
line diff
     1.1 --- a/VMS.c	Wed Jun 30 13:10:34 2010 -0700
     1.2 +++ b/VMS.c	Wed Jun 30 13:10:59 2010 -0700
     1.3 @@ -12,6 +12,15 @@
     1.4  #include "Queue_impl/BlockingQueue.h"
     1.5  
     1.6  
     1.7 +//===========================================================================
     1.8 +void
     1.9 +shutdownFn( void *dummy, VirtProcr *dummy2 );
    1.10 +
    1.11 +void
    1.12 +create_sched_slots( MasterEnv *masterEnv );
    1.13 +
    1.14 +//===========================================================================
    1.15 +
    1.16  /*Setup has two phases:
    1.17   * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
    1.18   *    the master virt procr into the work-queue, ready for first "call"
    1.19 @@ -36,10 +45,6 @@
    1.20   * the requestHandler and slaveScheduler plug-in functions
    1.21   */
    1.22  
    1.23 -void
    1.24 -create_sched_slots( MasterEnv *masterEnv );
    1.25 -
    1.26 -
    1.27  /*This allocates VMS data structures, populates the master VMSProc,
    1.28   * and master environment, and returns the master environment to the semantic
    1.29   * layer.
    1.30 @@ -65,11 +70,11 @@
    1.31     masterEnv->schedSlots[0]->needsProcrAssigned  = FALSE;  //says don't touch
    1.32     masterEnv->schedSlots[0]->workIsDone          = FALSE;  //says don't touch
    1.33     masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr;
    1.34 -
    1.35 +   masterEnv->masterVirtPr->schedSlot = masterEnv->schedSlots[0];
    1.36 +   
    1.37        //First core loop to start up gets this, which will schedule seed Pr
    1.38        //TODO: debug: check address of masterVirtPr
    1.39 -//TODO: commented out for debugging -- put it back in!!
    1.40 -//   writeCASQ( masterEnv->masterVirtPr, workQ );
    1.41 +   writeCASQ( masterEnv->masterVirtPr, workQ );
    1.42  
    1.43     numProcrsCreated = 1;
    1.44   }
    1.45 @@ -150,10 +155,11 @@
    1.46        //alloc stack locations, make stackPtr be the highest addr minus room
    1.47        // for 2 params + return addr.  Return addr (NULL) is in loc pointed to
    1.48        // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above
    1.49 -   stackLocs = malloc( 0x100000 ); //1 meg stack -- default Win thread's size
    1.50 -   stackPtr = ( (char *)stackLocs + 0x100000 - 0x10 );
    1.51 +   stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
    1.52 +   newPr->startOfStack = stackLocs;
    1.53 +   stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 );
    1.54        //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
    1.55 -   *( (int *)stackPtr + 2 ) = (int) newPr;  //rightmost param -- 32bit pointer
    1.56 +   *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer
    1.57     *( (int *)stackPtr + 1 ) = (int) initialData;  //next  param to left
    1.58     newPr->stackPtr = stackPtr; //core loop will switch to this, then
    1.59     newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
    1.60 @@ -162,19 +168,6 @@
    1.61   }
    1.62  
    1.63  
    1.64 -/*This inserts the semantic-layer's data into the standard VMS carrier
    1.65 - */
    1.66 -inline void
    1.67 -VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
    1.68 - { SlaveReqst *req;
    1.69 -
    1.70 -   req = malloc( sizeof(SlaveReqst) );
    1.71 -   req->slaveFrom      = callingPr;
    1.72 -   req->semReqData     = semReqData;
    1.73 -   req->nextRequest    = callingPr->requests;
    1.74 -   callingPr->requests = req;
    1.75 - }
    1.76 -
    1.77   /*there is a label inside this function -- save the addr of this label in
    1.78   * the callingPr struc, as the pick-up point from which to start the next
    1.79   * work-unit for that procr.  If turns out have to save registers, then
    1.80 @@ -185,10 +178,9 @@
    1.81   * next work-unit for that procr.
    1.82   */
    1.83  void
    1.84 -VMS__suspend_processor( VirtProcr *callingPr )
    1.85 +VMS__suspend_procr( VirtProcr *callingPr )
    1.86   { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr;
    1.87     void *coreLoopFramePtr;
    1.88 -   int coreIdx;
    1.89  
    1.90        //The request to master will cause this suspended virt procr to get
    1.91        // scheduled again at some future point -- to resume, core loop jumps
    1.92 @@ -209,6 +201,8 @@
    1.93  
    1.94        //Save the virt procr's stack and frame ptrs, restore coreloop's frame
    1.95        // ptr, then jump back to "start" of core loop
    1.96 +      //Note, GCC compiles to assembly that saves esp and ebp in the stack
    1.97 +      // frame -- so have to explicitly do assembly that saves to memory
    1.98     asm volatile("movl %0,     %%eax;  \
    1.99                   movl %%esp, (%%eax); \
   1.100                   movl %1,     %%eax;  \
   1.101 @@ -228,37 +222,208 @@
   1.102     return;
   1.103   }
   1.104  
   1.105 +
   1.106 +
   1.107 +/*This is equivalent to "jump back to core loop" -- it's mainly only used
   1.108 + * just after adding dissipate request to a processor -- so the semantic
   1.109 + * layer is the only place it will be seen and/or used.
   1.110 + *
   1.111 + *It does almost the same thing as suspend, except don't need to save the
   1.112 + * stack nor set the nextInstrPt
   1.113 + *
   1.114 + *As of June 30, 2010  just implementing as a call to suspend -- just sugar
   1.115 + */
   1.116  void
   1.117 -VMS__dissipate_animating_processor( VirtProcr *animatingPr )
   1.118 +VMS__return_from_fn( VirtProcr *animatingPr )
   1.119   {
   1.120 -
   1.121 +   VMS__suspend_procr( animatingPr );
   1.122   }
   1.123  
   1.124 -/*This runs in main thread -- so can only signal to the core loop to shut
   1.125 - * itself down --
   1.126 +
   1.127 +/*Not sure yet the form going to put "dissipate" in, so this is the third
   1.128 + * possibility -- the semantic layer can just make a macro that looks like
   1.129 + * a call to its name, then expands to a call to this.
   1.130   *
   1.131 - *Want the master to decide when to shut down -- when semantic layer tells it
   1.132 - * to -- say, when all the application-virtual processors have dissipated.
   1.133 + *As of June 30, 2010  this looks like the top choice..
   1.134   *
   1.135 - *Maybe return a special code from scheduling plug-in..  master checks and
   1.136 - * when sees, it shuts down the core loops -- does this by scheduling a
   1.137 - * special virt processor whose next instr pt is the core-end label.
   1.138 + *This adds a request to dissipate, then suspends the processor so that the
   1.139 + * request handler will receive the request.  The request handler is what
   1.140 + * does the work of freeing memory and removing the processor from the
   1.141 + * semantic environment's data structures.
   1.142 + *The request handler also is what figures out when to shutdown the VMS
   1.143 + * system -- which causes all the core loop threads to die, and returns from
   1.144 + * the call that started up VMS to perform the work.
   1.145 + *
   1.146 + *This form is a bit misleading to understand if one is trying to figure out
   1.147 + * how VMS works -- it looks like a normal function call, but inside it
   1.148 + * sends a request to the request handler and suspends the processor, which
   1.149 + * jumps out of the VMS__dissipate_procr function, and out of all nestings
   1.150 + * above it, transferring the work of dissipating to the request handler,
   1.151 + * which then does the actual work -- causing the processor that animated
   1.152 + * the call of this function to disappear and the "hanging" state of this
   1.153 + * function to just poof into thin air -- the virtual processor's trace
   1.154 + * never returns from this call, but instead the virtual processor's trace
   1.155 + * gets suspended in this call and all the virt processor's state disap-
   1.156 + * pears -- making that suspend the last thing in the virt procr's trace.
   1.157   */
   1.158  void
   1.159 -VMS__shutdown()
   1.160 +VMS__dissipate_procr( VirtProcr *procrToDissipate )
   1.161 + { VMSReqst *req;
   1.162 +
   1.163 +   req = malloc( sizeof(VMSReqst) );
   1.164 +//   req->virtProcrFrom      = callingPr;
   1.165 +   req->reqType               = dissipate;
   1.166 +   req->nextReqst             = procrToDissipate->requests;
   1.167 +   procrToDissipate->requests = req;
   1.168 +   
   1.169 +   VMS__suspend_procr( procrToDissipate );
   1.170 +}
   1.171 +
   1.172 +
   1.173 +/*This inserts the semantic-layer's request data into standard VMS carrier
   1.174 + */
   1.175 +inline void
   1.176 +VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
   1.177 + { VMSReqst *req;
   1.178 +
   1.179 +   req = malloc( sizeof(VMSReqst) );
   1.180 +//   req->virtProcrFrom      = callingPr;
   1.181 +   req->reqType        = semantic;
   1.182 +   req->semReqData     = semReqData;
   1.183 +   req->nextReqst      = callingPr->requests;
   1.184 +   callingPr->requests = req;
   1.185 + }
   1.186 +
   1.187 +
   1.188 +/*This creates a request of type "dissipate" -- which will cause the virt
   1.189 + * processor's state and owned locations to be freed
   1.190 + */
   1.191 +inline void
   1.192 +VMS__send_dissipate_request( VirtProcr *procrToDissipate )
   1.193 + { VMSReqst *req;
   1.194 +
   1.195 +   req = malloc( sizeof(VMSReqst) );
   1.196 +//   req->virtProcrFrom      = callingPr;
   1.197 +   req->reqType               = dissipate;
   1.198 +   req->nextReqst             = procrToDissipate->requests;
   1.199 +   procrToDissipate->requests = req;
   1.200 + }
   1.201 +
   1.202 +
   1.203 +//TODO: add a semantic-layer supplied "freer" for the semantic-data portion
   1.204 +// of a request -- IE call with both a virt procr and a fn-ptr to request
   1.205 +// freer (or maybe put request freer as a field in virt procr?)
   1.206 +void
   1.207 +VMS__remove_and_free_top_request( VirtProcr *procrWithReq )
   1.208 + { VMSReqst *req;
   1.209 +
   1.210 +   req = procrWithReq->requests;
   1.211 +   procrWithReq->requests = procrWithReq->requests->nextReqst;
   1.212 +   free( req );
   1.213 + }
   1.214 +
   1.215 +/*This must be called by the request handler plugin -- it cannot be called
   1.216 + * from the semantic library "dissipate processor" function -- instead, the
   1.217 + * semantic layer has to generate a request for the plug-in to call this
   1.218 + * function.
   1.219 + *The reason is that this frees the virtual processor's stack -- which is
   1.220 + * still in use inside semantic library calls!
   1.221 + *
   1.222 + *This frees or recycles all the state owned by and comprising the animating
   1.223 + * virtual procr.  It frees any state that was malloc'd by the VMS system
   1.224 + * itself, and asks the VMS system to dis-own any VMS__malloc'd locations.
   1.225 + *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
   1.226 + * state, then that state gets freed (or sent to recycling) as a side-effect
   1.227 + * of dis-owning it.
   1.228 + */
   1.229 +void
   1.230 +VMS__free_procr_locs( VirtProcr *animatingPr )
   1.231 + {
   1.232 +      //dis-own all locations owned by this processor, causing to be freed
   1.233 +      // any locations that it is (was) sole owner of
   1.234 +   //TODO: implement VMS__malloc system, including "give up ownership"
   1.235 +
   1.236 +   VMS__remove_and_free_top_request( animatingPr );
   1.237 +   free( animatingPr->startOfStack );
   1.238 +   
   1.239 +      //NOTE: animatingPr->semanticData should either have been allocated
   1.240 +      // with VMS__malloc, or else freed in the request handler plug-in.
   1.241 +      //NOTE: initialData was given to the processor, so should either have
   1.242 +      // been alloc'd with VMS__malloc, or freed by the level above animPr.
   1.243 +      //So, all that's left to free here is the VirtProcr struc itself
   1.244 +   free( animatingPr );
   1.245 + }
   1.246 +
   1.247 +
   1.248 +/*The semantic layer figures out when the work is done ( perhaps by a call
   1.249 + * in the application to "work all done", or perhaps all the virtual
   1.250 + * processors have dissipated.. a.s.o. )
   1.251 + *
   1.252 + *The semantic layer is responsible for making sure all work has fully
   1.253 + * completed before using this to shutdown the VMS system.
   1.254 + *
   1.255 + *After the semantic layer has determined it wants to shut down, the
   1.256 + * next time the Master Loop calls the scheduler plug-in, the scheduler
   1.257 + * then calls this function and returns the virtual processor it gets back.
   1.258 + *
   1.259 + *When the shut-down processor runs, it first frees all locations malloc'd to
   1.260 + * the VMS system (that wasn't
   1.261 + * specified as return-locations).  Then it creates one core-loop shut-down
   1.262 + * processor for each core loop and puts them all into the workQ.  When a
   1.263 + * core loop animates a core loop shut-down processor, it causes exit-thread
   1.264 + * to run, and when all core loop threads have exited, then the "wait for
   1.265 + * work to finish" in the main thread is woken, and the function-call that
   1.266 + * started all the work returns.
   1.267 + *
   1.268 + *The function animated by this processor performs the shut-down work.
   1.269 + */
   1.270 +VirtProcr *
   1.271 +VMS__create_the_shutdown_procr()
   1.272 + {
   1.273 +   return VMS__create_procr( &shutdownFn, NULL );
   1.274 + }
   1.275 +
   1.276 +
   1.277 +/*This is the function run by the special "shut-down" processor
   1.278 + * 
   1.279 + *The _VMSMasterEnv is needed by this shut down function, so the "wait"
   1.280 + * function run in the main loop has to free it, and the thread-related
   1.281 + * locations (coreLoopThdParams a.s.o.).
   1.282 + *However, the semantic environment and all data malloc'd to VMS can be
   1.283 + * freed here.
   1.284 + *
   1.285 + *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
   1.286 + * locations it needs -- they will be automatically freed by the standard
   1.287 + * "free all owned locations"
   1.288 + *
   1.289 + *Free any locations malloc'd to the VMS system (that weren't
   1.290 + * specified as return-locations).
   1.291 + *Then create one core-loop shut-down processor for each core loop and puts
   1.292 + * them all into the workQ.
   1.293 + */
   1.294 +void
   1.295 +shutdownFn( void *dummy, VirtProcr *animatingPr )
   1.296   { int coreIdx;
   1.297     VirtProcr *shutDownPr;
   1.298 - 
   1.299 -   //TODO: restore the "orig" stack pointer and frame ptr saved in VMS__start
   1.300 -   //create a "special" virtual processor, one for each core loop that has
   1.301 -   // the "loop end" point as its "next instr" point -- when the core loop
   1.302 -   // jumps to animate the virt procr, the jump lands it at its own
   1.303 -   // shut-down code.
   1.304 +   CASQueueStruc *workQ = _VMSWorkQ;
   1.305 +
   1.306 +      //free all the locations owned within the VMS system
   1.307 +   //TODO: write VMS__malloc and free.. -- take the DKU malloc as starting pt
   1.308 +
   1.309 +      //make the core loop shut-down processors and put them into the workQ
   1.310     for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   1.311      {
   1.312        shutDownPr = VMS__create_procr( NULL, NULL );
   1.313        shutDownPr->nextInstrPt = _VMSMasterEnv->coreLoopShutDownPt;
   1.314 +      writeCASQ( shutDownPr, workQ );
   1.315      }
   1.316 +
   1.317 +      //This is an issue: the animating processor of this function may not
   1.318 +      // get its request handled before all the cores have shutdown.
   1.319 +      //TODO: after all the threads stop, clean out the MasterEnv, the
   1.320 +      // SemanticEnv, and the workQ before returning.
   1.321 +   VMS__send_dissipate_request( animatingPr );
   1.322 +   VMS__suspend_procr( animatingPr );  //will never come back from this
   1.323   }
   1.324  
   1.325