Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
view VMS.c @ 38:17d20e5cf924
measures coreloop and masterVP times
| author | Me |
|---|---|
| date | Tue, 07 Sep 2010 18:40:57 -0700 |
| parents | e69579a0e797 |
| children | 1df8d7f2c9b1 |
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
13 #include "Histogram/Histogram.h"
16 #define thdAttrs NULL
18 //===========================================================================
19 void
20 shutdownFn( void *dummy, VirtProcr *dummy2 );
22 SchedSlot **
23 create_sched_slots();
25 void
26 create_masterEnv();
28 void
29 create_the_coreLoop_OS_threads();
31 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
32 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
34 //===========================================================================
36 /*Setup has two phases:
37 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
38 * the master virt procr into the work-queue, ready for first "call"
39 * 2) Semantic layer then does its own init, which creates the seed virt
40 * procr inside the semantic layer, ready to schedule it when
41 * asked by the first run of the masterLoop.
42 *
43 *This part is bit weird because VMS really wants to be "always there", and
44 * have applications attach and detach.. for now, this VMS is part of
45 * the app, so the VMS system starts up as part of running the app.
46 *
47 *The semantic layer is isolated from the VMS internals by making the
48 * semantic layer do setup to a state that it's ready with its
49 * initial virt procrs, ready to schedule them to slots when the masterLoop
50 * asks. Without this pattern, the semantic layer's setup would
51 * have to modify slots directly to assign the initial virt-procrs, and put
52 * them into the readyToAnimateQ itself, breaking the isolation completely.
53 *
54 *
55 *The semantic layer creates the initial virt procr(s), and adds its
56 * own environment to masterEnv, and fills in the pointers to
57 * the requestHandler and slaveScheduler plug-in functions
58 */
60 /*This allocates VMS data structures, populates the master VMSProc,
61 * and master environment, and returns the master environment to the semantic
62 * layer.
63 */
64 void
65 VMS__init()
66 {
67 create_masterEnv();
68 create_the_coreLoop_OS_threads();
69 }
71 /*To initialize the sequential version, just don't create the threads
72 */
73 void
74 VMS__init_Seq()
75 {
76 create_masterEnv();
77 }
79 void
80 create_masterEnv()
81 { MasterEnv *masterEnv;
82 SRSWQueueStruc **readyToAnimateQs;
83 int coreIdx;
84 VirtProcr **masterVPs;
85 SchedSlot ***allSchedSlots; //ptr to array of ptrs
87 //Make the master env, which holds everything else
88 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
89 masterEnv = _VMSMasterEnv;
90 //Need to set start pt here 'cause used by seed procr, which is created
91 // before the first core loop starts up. -- not sure how yet..
92 // masterEnv->coreLoopStartPt = ;
93 // masterEnv->coreLoopEndPt = ;
95 //Make a readyToAnimateQ for each core loop
96 readyToAnimateQs = malloc( NUM_CORES * sizeof(SRSWQueueStruc *) );
97 masterVPs = malloc( NUM_CORES * sizeof(VirtProcr *) );
99 //One array for each core, 3 in array, core's masterVP scheds all
100 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) );
102 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
103 {
104 readyToAnimateQs[ coreIdx ] = makeSRSWQ();
106 //Q: should give masterVP core-specific into as its init data?
107 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv );
108 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
109 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
110 }
111 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
112 _VMSMasterEnv->masterVPs = masterVPs;
113 _VMSMasterEnv->allSchedSlots = allSchedSlots;
117 //Aug 19, 2010: no longer need to place initial masterVP into queue
118 // because coreLoop now controls -- animates its masterVP when no work
121 //==================== malloc substitute ========================
122 //
123 //Testing whether malloc is using thread-local storage and therefore
124 // causing unreliable behavior.
125 //Just allocate a massive chunk of memory and roll own malloc/free and
126 // make app use VMS__malloc_to, which will suspend and perform malloc
127 // in the master, taking from this massive chunk.
129 // initFreeList();
131 //============================= MEASUREMENT STUFF ========================
132 #ifdef MEAS__TIME_STAMP_SUSP
133 _VMSMasterEnv->measSuspHist = makeHistogram( 25, 110, 135 );
134 #endif
136 #ifdef MEAS__TIME_MASTER
137 _VMSMasterEnv->measMasterHist = makeHistogram( 25, 500, 800 );
138 #endif
139 //========================================================================
141 }
143 /*
144 void
145 initMasterMalloc()
146 {
147 _VMSMasterEnv->mallocChunk = malloc( MASSIVE_MALLOC_SIZE );
149 //The free-list element is the first several locations of an
150 // allocated chunk -- the address given to the application is pre-
151 // pended with both the ownership structure and the free-list struc.
152 //So, write the values of these into the first locations of
153 // mallocChunk -- which marks it as free & puts in its size.
154 listElem = (FreeListElem *)_VMSMasterEnv->mallocChunk;
155 listElem->size = MASSIVE_MALLOC_SIZE - NUM_PREPEND_BYTES
156 listElem->next = NULL;
157 }
159 void
160 dissipateMasterMalloc()
161 {
162 //Just foo code -- to get going -- doing as if free list were link-list
163 currElem = _VMSMasterEnv->freeList;
164 while( currElem != NULL )
165 {
166 nextElem = currElem->next;
167 masterFree( currElem );
168 currElem = nextElem;
169 }
170 free( _VMSMasterEnv->freeList );
171 }
172 */
174 SchedSlot **
175 create_sched_slots()
176 { SchedSlot **schedSlots;
177 int i;
179 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
181 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
182 {
183 schedSlots[i] = malloc( sizeof(SchedSlot) );
185 //Set state to mean "handling requests done, slot needs filling"
186 schedSlots[i]->workIsDone = FALSE;
187 schedSlots[i]->needsProcrAssigned = TRUE;
188 }
189 return schedSlots;
190 }
193 void
194 freeSchedSlots( SchedSlot **schedSlots )
195 { int i;
196 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
197 {
198 free( schedSlots[i] );
199 }
200 free( schedSlots );
201 }
204 void
205 create_the_coreLoop_OS_threads()
206 {
207 //========================================================================
208 // Create the Threads
209 int coreIdx, retCode;
211 //Need the threads to be created suspended, and wait for a signal
212 // before proceeding -- gives time after creating to initialize other
213 // stuff before the coreLoops set off.
214 _VMSMasterEnv->setupComplete = 0;
216 //Make the threads that animate the core loops
217 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
218 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) );
219 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
221 retCode =
222 pthread_create( &(coreLoopThdHandles[coreIdx]),
223 thdAttrs,
224 &coreLoop,
225 (void *)(coreLoopThdParams[coreIdx]) );
226 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);}
227 }
228 }
230 /*Semantic layer calls this when it want the system to start running..
231 *
232 *This starts the core loops running then waits for them to exit.
233 */
234 void
235 VMS__start_the_work_then_wait_until_done()
236 { int coreIdx;
237 //Start the core loops running
238 //===========================================================================
239 TSCount startCount, endCount;
240 unsigned long long count = 0, freq = 0;
241 double runTime;
243 startCount = getTSCount();
245 //tell the core loop threads that setup is complete
246 //get lock, to lock out any threads still starting up -- they'll see
247 // that setupComplete is true before entering while loop, and so never
248 // wait on the condition
249 pthread_mutex_lock( &suspendLock );
250 _VMSMasterEnv->setupComplete = 1;
251 pthread_mutex_unlock( &suspendLock );
252 pthread_cond_broadcast( &suspend_cond );
255 //wait for all to complete
256 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
257 {
258 pthread_join( coreLoopThdHandles[coreIdx], NULL );
259 }
261 //NOTE: do not clean up VMS env here -- semantic layer has to have
262 // a chance to clean up its environment first, then do a call to free
263 // the Master env and rest of VMS locations
266 endCount = getTSCount();
267 count = endCount - startCount;
269 runTime = (double)count / (double)TSCOUNT_FREQ;
271 printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
272 }
274 /*Only difference between version with an OS thread pinned to each core and
275 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
276 */
277 void
278 VMS__start_the_work_then_wait_until_done_Seq()
279 {
280 //Instead of un-suspending threads, just call the one and only
281 // core loop (sequential version), in the main thread.
282 coreLoop_Seq( NULL );
284 }
288 /*Create stack, then create __cdecl structure on it and put initialData and
289 * pointer to the new structure instance into the parameter positions on
290 * the stack
291 *Then put function pointer into nextInstrPt -- the stack is setup in std
292 * call structure, so jumping to function ptr is same as a GCC generated
293 * function call
294 *No need to save registers on old stack frame, because there's no old
295 * animator state to return to --
296 *
297 */
298 VirtProcr *
299 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
300 { VirtProcr *newPr;
301 char *stackLocs, *stackPtr;
303 newPr = malloc( sizeof(VirtProcr) );
304 newPr->procrID = numProcrsCreated++;
305 newPr->nextInstrPt = fnPtr;
306 newPr->initialData = initialData;
307 newPr->requests = NULL;
308 newPr->schedSlot = NULL;
309 // newPr->coreLoopStartPt = _VMSMasterEnv->coreLoopStartPt;
311 //fnPtr takes two params -- void *initData & void *animProcr
312 //alloc stack locations, make stackPtr be the highest addr minus room
313 // for 2 params + return addr. Return addr (NULL) is in loc pointed to
314 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above
315 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
316 if(stackLocs == 0)
317 {perror("malloc stack"); exit(1);}
318 newPr->startOfStack = stackLocs;
319 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 );
320 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
321 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer
322 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left
323 newPr->stackPtr = stackPtr; //core loop will switch to this, then
324 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
326 return newPr;
327 }
330 /*there is a label inside this function -- save the addr of this label in
331 * the callingPr struc, as the pick-up point from which to start the next
332 * work-unit for that procr. If turns out have to save registers, then
333 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
334 * "done with work-unit" label. The procr struc is in the request in the
335 * slave that animated the just-ended work-unit, so all the state is saved
336 * there, and will get passed along, inside the request handler, to the
337 * next work-unit for that procr.
338 */
339 void
340 VMS__suspend_procr( VirtProcr *animatingPr )
341 { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr;
342 void *coreLoopFramePtr;
344 //The request to master will cause this suspended virt procr to get
345 // scheduled again at some future point -- to resume, core loop jumps
346 // to the resume point (below), which causes restore of saved regs and
347 // "return" from this call.
348 animatingPr->nextInstrPt = &&ResumePt;
350 //return ownership of the virt procr and sched slot to Master virt pr
351 animatingPr->schedSlot->workIsDone = TRUE;
352 // coreIdx = callingPr->coreAnimatedBy;
354 stackPtrAddr = &(animatingPr->stackPtr);
355 framePtrAddr = &(animatingPr->framePtr);
357 jmpPt = _VMSMasterEnv->coreLoopStartPt;
358 coreLoopFramePtr = animatingPr->coreLoopFramePtr;//need this only
359 coreLoopStackPtr = animatingPr->coreLoopStackPtr;//safety
361 //Save the virt procr's stack and frame ptrs,
362 asm volatile("movl %0, %%eax; \
363 movl %%esp, (%%eax); \
364 movl %1, %%eax; \
365 movl %%ebp, (%%eax) "\
366 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \
367 /* inputs */ : \
368 /* clobber */ : "%eax" \
369 );
371 #ifdef MEAS__TIME_STAMP_SUSP
372 //record time stamp into animating procr: compared to time-stamp
373 // recorded below, at the resume pt.
374 //NOTE: doing minimal work here 'cause only a few instrs executed in
375 // core loop, so only using bottom half of time-stamp -- have to
376 // externally do sanity check & throw out absurd values due to rollover
378 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
379 #endif
381 //restore coreloop's frame ptr, then jump back to "start" of core loop
382 //Note, GCC compiles to assembly that saves esp and ebp in the stack
383 // frame -- so have to explicitly do assembly that saves to memory
384 asm volatile("movl %0, %%eax; \
385 movl %1, %%esp; \
386 movl %2, %%ebp; \
387 jmp %%eax " \
388 /* outputs */ : \
389 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
390 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
391 ); //list everything as clobbered to force GCC to save all
392 // live vars that are in regs on stack before this
393 // assembly, so that stack pointer is correct, before jmp
395 ResumePt:
396 #ifdef MEAS__TIME_STAMP_SUSP
397 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
398 //Take difference between the pre-suspend and post-suspend times
399 // and do sanity check to see if rollover happened between
400 int diff = animatingPr->postSuspTSCLow - animatingPr->preSuspTSCLow;
401 if( diff > 1000000 ) diff = 0;
402 addToHist( diff, _VMSMasterEnv->measSuspHist );
404 #endif
406 return;
407 }
412 /*
413 *This adds a request to dissipate, then suspends the processor so that the
414 * request handler will receive the request. The request handler is what
415 * does the work of freeing memory and removing the processor from the
416 * semantic environment's data structures.
417 *The request handler also is what figures out when to shutdown the VMS
418 * system -- which causes all the core loop threads to die, and returns from
419 * the call that started up VMS to perform the work.
420 *
421 *This form is a bit misleading to understand if one is trying to figure out
422 * how VMS works -- it looks like a normal function call, but inside it
423 * sends a request to the request handler and suspends the processor, which
424 * jumps out of the VMS__dissipate_procr function, and out of all nestings
425 * above it, transferring the work of dissipating to the request handler,
426 * which then does the actual work -- causing the processor that animated
427 * the call of this function to disappear and the "hanging" state of this
428 * function to just poof into thin air -- the virtual processor's trace
429 * never returns from this call, but instead the virtual processor's trace
430 * gets suspended in this call and all the virt processor's state disap-
431 * pears -- making that suspend the last thing in the virt procr's trace.
432 */
433 void
434 VMS__dissipate_procr( VirtProcr *procrToDissipate )
435 { VMSReqst *req;
437 req = malloc( sizeof(VMSReqst) );
438 // req->virtProcrFrom = callingPr;
439 req->reqType = dissipate;
440 req->nextReqst = procrToDissipate->requests;
441 procrToDissipate->requests = req;
443 VMS__suspend_procr( procrToDissipate );
444 }
447 /*This inserts the semantic-layer's request data into standard VMS carrier
448 */
449 inline void
450 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
451 { VMSReqst *req;
453 req = malloc( sizeof(VMSReqst) );
454 // req->virtProcrFrom = callingPr;
455 req->reqType = semantic;
456 req->semReqData = semReqData;
457 req->nextReqst = callingPr->requests;
458 callingPr->requests = req;
459 }
462 /*Use this to get first request before starting request handler's loop
463 */
464 VMSReqst *
465 VMS__take_top_request_from( VirtProcr *procrWithReq )
466 { VMSReqst *req;
468 req = procrWithReq->requests;
469 if( req == NULL ) return req;
471 procrWithReq->requests = procrWithReq->requests->nextReqst;
472 return req;
473 }
475 /*A subtle bug due to freeing then accessing "next" after freed caused this
476 * form of call to be put in -- so call this at end of request handler loop
477 * that iterates through the requests.
478 */
479 VMSReqst *
480 VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq )
481 { VMSReqst *req;
483 req = procrWithReq->requests;
484 if( req == NULL ) return NULL;
486 procrWithReq->requests = procrWithReq->requests->nextReqst;
487 VMS__free_request( req );
488 return procrWithReq->requests;
489 }
492 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
493 // of a request -- IE call with both a virt procr and a fn-ptr to request
494 // freer (also maybe put sem request freer as a field in virt procr?)
495 //MeasVMS relies right now on this only freeing VMS layer of request -- the
496 // semantic portion of request is alloc'd and freed by request handler
497 void
498 VMS__free_request( VMSReqst *req )
499 {
500 free( req );
501 }
505 inline int
506 VMS__isSemanticReqst( VMSReqst *req )
507 {
508 return ( req->reqType == semantic );
509 }
512 inline void *
513 VMS__take_sem_reqst_from( VMSReqst *req )
514 {
515 return req->semReqData;
516 }
518 inline int
519 VMS__isDissipateReqst( VMSReqst *req )
520 {
521 return ( req->reqType == dissipate );
522 }
524 inline int
525 VMS__isCreateReqst( VMSReqst *req )
526 {
527 return ( req->reqType == regCreated );
528 }
530 void
531 VMS__send_req_to_register_new_procr(VirtProcr *newPr, VirtProcr *reqstingPr)
532 { VMSReqst *req;
534 req = malloc( sizeof(VMSReqst) );
535 req->reqType = regCreated;
536 req->semReqData = newPr;
537 req->nextReqst = reqstingPr->requests;
538 reqstingPr->requests = req;
540 VMS__suspend_procr( reqstingPr );
541 }
545 /*This must be called by the request handler plugin -- it cannot be called
546 * from the semantic library "dissipate processor" function -- instead, the
547 * semantic layer has to generate a request for the plug-in to call this
548 * function.
549 *The reason is that this frees the virtual processor's stack -- which is
550 * still in use inside semantic library calls!
551 *
552 *This frees or recycles all the state owned by and comprising the VMS
553 * portion of the animating virtual procr. The request handler must first
554 * free any semantic data created for the processor that didn't use the
555 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
556 * system to disown any state that did use VMS_malloc, and then frees the
557 * statck and the processor-struct itself.
558 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
559 * state, then that state gets freed (or sent to recycling) as a side-effect
560 * of dis-owning it.
561 */
562 void
563 VMS__handle_dissipate_reqst( VirtProcr *animatingPr )
564 {
565 //dis-own all locations owned by this processor, causing to be freed
566 // any locations that it is (was) sole owner of
567 //TODO: implement VMS__malloc system, including "give up ownership"
569 //The dissipate request might still be attached, so remove and free it
570 VMS__free_top_and_give_next_request_from( animatingPr );
572 //NOTE: initialData was given to the processor, so should either have
573 // been alloc'd with VMS__malloc, or freed by the level above animPr.
574 //So, all that's left to free here is the stack and the VirtProcr struc
575 // itself
576 free( animatingPr->startOfStack );
577 free( animatingPr );
578 }
581 //TODO: re-architect so that have clean separation between request handler
582 // and master loop, for dissipate, create, shutdown, and other non-semantic
583 // requests. Issue is chain: one removes requests from AppVP, one dispatches
584 // on type of request, and one handles each type.. but some types require
585 // action from both request handler and master loop -- maybe just give the
586 // request handler calls like: VMS__handle_X_request_type
588 void
589 endOSThreadFn( void *initData, VirtProcr *animatingPr );
591 /*This is called by the semantic layer's request handler when it decides its
592 * time to shut down the VMS system. Calling this causes the core loop OS
593 * threads to exit, which unblocks the entry-point function that started up
594 * VMS, and allows it to grab the result and return to the original single-
595 * threaded application.
596 *
597 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
598 * and-wait function has to free a bunch of stuff after it detects the
599 * threads have all died: the masterEnv, the thread-related locations,
600 * masterVP any AppVPs that might still be allocated and sitting in the
601 * semantic environment, or have been orphaned in the _VMSWorkQ.
602 *
603 *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the
604 * locations it needs, and give ownership to masterVP. Then, they will be
605 * automatically freed when the masterVP is dissipated. (This happens after
606 * the core loop threads have all exited)
607 *
608 *In here,create one core-loop shut-down processor for each core loop and put
609 * them all directly into the readyToAnimateQ.
610 *Note, this function can ONLY be called after the semantic environment no
611 * longer cares if AppVPs get animated after the point this is called. In
612 * other words, this can be used as an abort, or else it should only be
613 * called when all AppVPs have finished dissipate requests -- only at that
614 * point is it sure that all results have completed.
615 */
616 void
617 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr )
618 { int coreIdx;
619 VirtProcr *shutDownPr;
621 //create the shutdown processors, one for each core loop -- put them
622 // directly into the Q -- each core will die when gets one
623 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
624 {
625 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
626 writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
627 }
629 }
632 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
633 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
634 *This function has the sole purpose of setting the stack and framePtr
635 * to the coreLoop's stack and framePtr.. it does that then jumps to the
636 * core loop's shutdown point -- might be able to just call Pthread_exit
637 * from here, but am going back to the pthread's stack and setting everything
638 * up just as if it never jumped out, before calling pthread_exit.
639 *The end-point of core loop will free the stack and so forth of the
640 * processor that animates this function, (this fn is transfering the
641 * animator of the AppVP that is in turn animating this function over
642 * to core loop function -- note that this slices out a level of virtual
643 * processors).
644 */
645 void
646 endOSThreadFn( void *initData, VirtProcr *animatingPr )
647 { void *jmpPt, *coreLoopStackPtr, *coreLoopFramePtr;
649 jmpPt = _VMSMasterEnv->coreLoopEndPt;
650 coreLoopStackPtr = animatingPr->coreLoopStackPtr;
651 coreLoopFramePtr = animatingPr->coreLoopFramePtr;
654 asm volatile("movl %0, %%eax; \
655 movl %1, %%esp; \
656 movl %2, %%ebp; \
657 jmp %%eax " \
658 /* outputs */ : \
659 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
660 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
661 );
662 }
665 /*This is called after the threads have shut down and control has returned
666 * to the semantic layer, in the entry point function in the main thread.
667 * It has to free anything allocated during VMS_init, and any other alloc'd
668 * locations that might be left over.
669 */
670 void
671 VMS__cleanup_after_shutdown()
672 {
673 SRSWQueueStruc **readyToAnimateQs;
674 int coreIdx;
675 VirtProcr **masterVPs;
676 SchedSlot ***allSchedSlots; //ptr to array of ptrs
678 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
679 masterVPs = _VMSMasterEnv->masterVPs;
680 allSchedSlots = _VMSMasterEnv->allSchedSlots;
682 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
683 {
684 freeSRSWQ( readyToAnimateQs[ coreIdx ] );
686 VMS__handle_dissipate_reqst( masterVPs[ coreIdx ] );
688 freeSchedSlots( allSchedSlots[ coreIdx ] );
689 }
691 free( _VMSMasterEnv->readyToAnimateQs );
692 free( _VMSMasterEnv->masterVPs );
693 free( _VMSMasterEnv->allSchedSlots );
695 free( _VMSMasterEnv );
696 }
699 //===========================================================================
701 inline TSCount getTSCount()
702 { unsigned int low, high;
703 TSCount out;
705 saveTimeStampCountInto( low, high );
706 out = high;
707 out = (out << 32) + low;
708 return out;
709 }
