view VMS.c @ 40:1df8d7f2c9b1

Added measurement of suspend time and master time Weird suspend-time histogram -- will try moving hist update out of coreloop and into app
author Me
date Sat, 11 Sep 2010 03:26:07 -0700
parents 17d20e5cf924
children cf3e9238aeb0
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
13 #include "Histogram/Histogram.h"
16 #define thdAttrs NULL
18 //===========================================================================
19 void
20 shutdownFn( void *dummy, VirtProcr *dummy2 );
22 SchedSlot **
23 create_sched_slots();
25 void
26 create_masterEnv();
28 void
29 create_the_coreLoop_OS_threads();
31 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
32 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
34 //===========================================================================
36 /*Setup has two phases:
37 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
38 * the master virt procr into the work-queue, ready for first "call"
39 * 2) Semantic layer then does its own init, which creates the seed virt
40 * procr inside the semantic layer, ready to schedule it when
41 * asked by the first run of the masterLoop.
42 *
43 *This part is bit weird because VMS really wants to be "always there", and
44 * have applications attach and detach.. for now, this VMS is part of
45 * the app, so the VMS system starts up as part of running the app.
46 *
47 *The semantic layer is isolated from the VMS internals by making the
48 * semantic layer do setup to a state that it's ready with its
49 * initial virt procrs, ready to schedule them to slots when the masterLoop
50 * asks. Without this pattern, the semantic layer's setup would
51 * have to modify slots directly to assign the initial virt-procrs, and put
52 * them into the readyToAnimateQ itself, breaking the isolation completely.
53 *
54 *
55 *The semantic layer creates the initial virt procr(s), and adds its
56 * own environment to masterEnv, and fills in the pointers to
57 * the requestHandler and slaveScheduler plug-in functions
58 */
60 /*This allocates VMS data structures, populates the master VMSProc,
61 * and master environment, and returns the master environment to the semantic
62 * layer.
63 */
64 void
65 VMS__init()
66 {
67 create_masterEnv();
68 create_the_coreLoop_OS_threads();
69 }
71 /*To initialize the sequential version, just don't create the threads
72 */
73 void
74 VMS__init_Seq()
75 {
76 create_masterEnv();
77 }
79 void
80 create_masterEnv()
81 { MasterEnv *masterEnv;
82 SRSWQueueStruc **readyToAnimateQs;
83 int coreIdx;
84 VirtProcr **masterVPs;
85 SchedSlot ***allSchedSlots; //ptr to array of ptrs
87 //Make the master env, which holds everything else
88 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
89 masterEnv = _VMSMasterEnv;
90 //Need to set start pt here 'cause used by seed procr, which is created
91 // before the first core loop starts up. -- not sure how yet..
92 // masterEnv->coreLoopStartPt = ;
93 // masterEnv->coreLoopEndPt = ;
95 //Make a readyToAnimateQ for each core loop
96 readyToAnimateQs = malloc( NUM_CORES * sizeof(SRSWQueueStruc *) );
97 masterVPs = malloc( NUM_CORES * sizeof(VirtProcr *) );
99 //One array for each core, 3 in array, core's masterVP scheds all
100 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) );
102 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
103 {
104 readyToAnimateQs[ coreIdx ] = makeSRSWQ();
106 //Q: should give masterVP core-specific into as its init data?
107 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv );
108 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
109 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
110 }
111 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
112 _VMSMasterEnv->masterVPs = masterVPs;
113 _VMSMasterEnv->allSchedSlots = allSchedSlots;
117 //Aug 19, 2010: no longer need to place initial masterVP into queue
118 // because coreLoop now controls -- animates its masterVP when no work
121 //==================== malloc substitute ========================
122 //
123 //Testing whether malloc is using thread-local storage and therefore
124 // causing unreliable behavior.
125 //Just allocate a massive chunk of memory and roll own malloc/free and
126 // make app use VMS__malloc_to, which will suspend and perform malloc
127 // in the master, taking from this massive chunk.
129 // initFreeList();
131 //============================= MEASUREMENT STUFF ========================
132 #ifdef MEAS__TIME_STAMP_SUSP
133 //RDTSC may run out of order, and so measure a time-span different
134 // from the desired time-span -- got some weird changes in suspend
135 // hist when added Master hist
136 _VMSMasterEnv->measSuspHist = makeHistogram( 25, 110, 1300 );
137 #endif
139 #ifdef MEAS__TIME_MASTER
140 _VMSMasterEnv->measMasterHist = makeHistogram( 25, 500, 800 );
141 #endif
142 //========================================================================
144 }
146 /*
147 void
148 initMasterMalloc()
149 {
150 _VMSMasterEnv->mallocChunk = malloc( MASSIVE_MALLOC_SIZE );
152 //The free-list element is the first several locations of an
153 // allocated chunk -- the address given to the application is pre-
154 // pended with both the ownership structure and the free-list struc.
155 //So, write the values of these into the first locations of
156 // mallocChunk -- which marks it as free & puts in its size.
157 listElem = (FreeListElem *)_VMSMasterEnv->mallocChunk;
158 listElem->size = MASSIVE_MALLOC_SIZE - NUM_PREPEND_BYTES
159 listElem->next = NULL;
160 }
162 void
163 dissipateMasterMalloc()
164 {
165 //Just foo code -- to get going -- doing as if free list were link-list
166 currElem = _VMSMasterEnv->freeList;
167 while( currElem != NULL )
168 {
169 nextElem = currElem->next;
170 masterFree( currElem );
171 currElem = nextElem;
172 }
173 free( _VMSMasterEnv->freeList );
174 }
175 */
177 SchedSlot **
178 create_sched_slots()
179 { SchedSlot **schedSlots;
180 int i;
182 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
184 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
185 {
186 schedSlots[i] = malloc( sizeof(SchedSlot) );
188 //Set state to mean "handling requests done, slot needs filling"
189 schedSlots[i]->workIsDone = FALSE;
190 schedSlots[i]->needsProcrAssigned = TRUE;
191 }
192 return schedSlots;
193 }
196 void
197 freeSchedSlots( SchedSlot **schedSlots )
198 { int i;
199 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
200 {
201 free( schedSlots[i] );
202 }
203 free( schedSlots );
204 }
207 void
208 create_the_coreLoop_OS_threads()
209 {
210 //========================================================================
211 // Create the Threads
212 int coreIdx, retCode;
214 //Need the threads to be created suspended, and wait for a signal
215 // before proceeding -- gives time after creating to initialize other
216 // stuff before the coreLoops set off.
217 _VMSMasterEnv->setupComplete = 0;
219 //Make the threads that animate the core loops
220 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
221 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) );
222 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
224 retCode =
225 pthread_create( &(coreLoopThdHandles[coreIdx]),
226 thdAttrs,
227 &coreLoop,
228 (void *)(coreLoopThdParams[coreIdx]) );
229 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);}
230 }
231 }
233 /*Semantic layer calls this when it want the system to start running..
234 *
235 *This starts the core loops running then waits for them to exit.
236 */
237 void
238 VMS__start_the_work_then_wait_until_done()
239 { int coreIdx;
240 //Start the core loops running
241 //===========================================================================
242 TSCount startCount, endCount;
243 unsigned long long count = 0, freq = 0;
244 double runTime;
246 startCount = getTSCount();
248 //tell the core loop threads that setup is complete
249 //get lock, to lock out any threads still starting up -- they'll see
250 // that setupComplete is true before entering while loop, and so never
251 // wait on the condition
252 pthread_mutex_lock( &suspendLock );
253 _VMSMasterEnv->setupComplete = 1;
254 pthread_mutex_unlock( &suspendLock );
255 pthread_cond_broadcast( &suspend_cond );
258 //wait for all to complete
259 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
260 {
261 pthread_join( coreLoopThdHandles[coreIdx], NULL );
262 }
264 //NOTE: do not clean up VMS env here -- semantic layer has to have
265 // a chance to clean up its environment first, then do a call to free
266 // the Master env and rest of VMS locations
269 endCount = getTSCount();
270 count = endCount - startCount;
272 runTime = (double)count / (double)TSCOUNT_FREQ;
274 printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
275 }
277 /*Only difference between version with an OS thread pinned to each core and
278 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
279 */
280 void
281 VMS__start_the_work_then_wait_until_done_Seq()
282 {
283 //Instead of un-suspending threads, just call the one and only
284 // core loop (sequential version), in the main thread.
285 coreLoop_Seq( NULL );
287 }
291 /*Create stack, then create __cdecl structure on it and put initialData and
292 * pointer to the new structure instance into the parameter positions on
293 * the stack
294 *Then put function pointer into nextInstrPt -- the stack is setup in std
295 * call structure, so jumping to function ptr is same as a GCC generated
296 * function call
297 *No need to save registers on old stack frame, because there's no old
298 * animator state to return to --
299 *
300 */
301 VirtProcr *
302 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
303 { VirtProcr *newPr;
304 char *stackLocs, *stackPtr;
306 newPr = malloc( sizeof(VirtProcr) );
307 newPr->procrID = numProcrsCreated++;
308 newPr->nextInstrPt = fnPtr;
309 newPr->initialData = initialData;
310 newPr->requests = NULL;
311 newPr->schedSlot = NULL;
312 // newPr->coreLoopStartPt = _VMSMasterEnv->coreLoopStartPt;
314 //fnPtr takes two params -- void *initData & void *animProcr
315 //alloc stack locations, make stackPtr be the highest addr minus room
316 // for 2 params + return addr. Return addr (NULL) is in loc pointed to
317 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above
318 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
319 if(stackLocs == 0)
320 {perror("malloc stack"); exit(1);}
321 newPr->startOfStack = stackLocs;
322 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 );
323 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
324 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer
325 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left
326 newPr->stackPtr = stackPtr; //core loop will switch to this, then
327 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
329 return newPr;
330 }
333 /*there is a label inside this function -- save the addr of this label in
334 * the callingPr struc, as the pick-up point from which to start the next
335 * work-unit for that procr. If turns out have to save registers, then
336 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
337 * "done with work-unit" label. The procr struc is in the request in the
338 * slave that animated the just-ended work-unit, so all the state is saved
339 * there, and will get passed along, inside the request handler, to the
340 * next work-unit for that procr.
341 */
342 void
343 VMS__suspend_procr( VirtProcr *animatingPr )
344 { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr;
345 void *coreLoopFramePtr;
347 //The request to master will cause this suspended virt procr to get
348 // scheduled again at some future point -- to resume, core loop jumps
349 // to the resume point (below), which causes restore of saved regs and
350 // "return" from this call.
351 animatingPr->nextInstrPt = &&ResumePt;
353 //return ownership of the virt procr and sched slot to Master virt pr
354 animatingPr->schedSlot->workIsDone = TRUE;
355 // coreIdx = callingPr->coreAnimatedBy;
357 stackPtrAddr = &(animatingPr->stackPtr);
358 framePtrAddr = &(animatingPr->framePtr);
360 jmpPt = _VMSMasterEnv->coreLoopStartPt;
361 coreLoopFramePtr = animatingPr->coreLoopFramePtr;//need this only
362 coreLoopStackPtr = animatingPr->coreLoopStackPtr;//safety
364 //Save the virt procr's stack and frame ptrs,
365 asm volatile("movl %0, %%eax; \
366 movl %%esp, (%%eax); \
367 movl %1, %%eax; \
368 movl %%ebp, (%%eax) "\
369 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \
370 /* inputs */ : \
371 /* clobber */ : "%eax" \
372 );
374 #ifdef MEAS__TIME_STAMP_SUSP
375 //record time stamp: compare to time-stamp recorded below, at resume
376 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
377 #endif
379 //restore coreloop's frame ptr, then jump back to "start" of core loop
380 //Note, GCC compiles to assembly that saves esp and ebp in the stack
381 // frame -- so have to explicitly do assembly that saves to memory
382 asm volatile("movl %0, %%eax; \
383 movl %1, %%esp; \
384 movl %2, %%ebp; \
385 jmp %%eax " \
386 /* outputs */ : \
387 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
388 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
389 ); //list everything as clobbered to force GCC to save all
390 // live vars that are in regs on stack before this
391 // assembly, so that stack pointer is correct, before jmp
393 ResumePt:
394 #ifdef MEAS__TIME_STAMP_SUSP
395 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
396 //Take difference between the pre-suspend and post-suspend times
397 // and do sanity check to see if rollover happened between
398 int diff = animatingPr->postSuspTSCLow - animatingPr->preSuspTSCLow;
399 if( diff > 1000000 ) diff = 0;
400 addToHist( diff, _VMSMasterEnv->measSuspHist );
402 #endif
404 return;
405 }
410 /*
411 *This adds a request to dissipate, then suspends the processor so that the
412 * request handler will receive the request. The request handler is what
413 * does the work of freeing memory and removing the processor from the
414 * semantic environment's data structures.
415 *The request handler also is what figures out when to shutdown the VMS
416 * system -- which causes all the core loop threads to die, and returns from
417 * the call that started up VMS to perform the work.
418 *
419 *This form is a bit misleading to understand if one is trying to figure out
420 * how VMS works -- it looks like a normal function call, but inside it
421 * sends a request to the request handler and suspends the processor, which
422 * jumps out of the VMS__dissipate_procr function, and out of all nestings
423 * above it, transferring the work of dissipating to the request handler,
424 * which then does the actual work -- causing the processor that animated
425 * the call of this function to disappear and the "hanging" state of this
426 * function to just poof into thin air -- the virtual processor's trace
427 * never returns from this call, but instead the virtual processor's trace
428 * gets suspended in this call and all the virt processor's state disap-
429 * pears -- making that suspend the last thing in the virt procr's trace.
430 */
431 void
432 VMS__dissipate_procr( VirtProcr *procrToDissipate )
433 { VMSReqst *req;
435 req = malloc( sizeof(VMSReqst) );
436 // req->virtProcrFrom = callingPr;
437 req->reqType = dissipate;
438 req->nextReqst = procrToDissipate->requests;
439 procrToDissipate->requests = req;
441 VMS__suspend_procr( procrToDissipate );
442 }
445 /*This inserts the semantic-layer's request data into standard VMS carrier
446 */
447 inline void
448 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
449 { VMSReqst *req;
451 req = malloc( sizeof(VMSReqst) );
452 // req->virtProcrFrom = callingPr;
453 req->reqType = semantic;
454 req->semReqData = semReqData;
455 req->nextReqst = callingPr->requests;
456 callingPr->requests = req;
457 }
460 /*Use this to get first request before starting request handler's loop
461 */
462 VMSReqst *
463 VMS__take_top_request_from( VirtProcr *procrWithReq )
464 { VMSReqst *req;
466 req = procrWithReq->requests;
467 if( req == NULL ) return req;
469 procrWithReq->requests = procrWithReq->requests->nextReqst;
470 return req;
471 }
473 /*A subtle bug due to freeing then accessing "next" after freed caused this
474 * form of call to be put in -- so call this at end of request handler loop
475 * that iterates through the requests.
476 */
477 VMSReqst *
478 VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq )
479 { VMSReqst *req;
481 req = procrWithReq->requests;
482 if( req == NULL ) return NULL;
484 procrWithReq->requests = procrWithReq->requests->nextReqst;
485 VMS__free_request( req );
486 return procrWithReq->requests;
487 }
490 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
491 // of a request -- IE call with both a virt procr and a fn-ptr to request
492 // freer (also maybe put sem request freer as a field in virt procr?)
493 //MeasVMS relies right now on this only freeing VMS layer of request -- the
494 // semantic portion of request is alloc'd and freed by request handler
495 void
496 VMS__free_request( VMSReqst *req )
497 {
498 free( req );
499 }
503 inline int
504 VMS__isSemanticReqst( VMSReqst *req )
505 {
506 return ( req->reqType == semantic );
507 }
510 inline void *
511 VMS__take_sem_reqst_from( VMSReqst *req )
512 {
513 return req->semReqData;
514 }
516 inline int
517 VMS__isDissipateReqst( VMSReqst *req )
518 {
519 return ( req->reqType == dissipate );
520 }
522 inline int
523 VMS__isCreateReqst( VMSReqst *req )
524 {
525 return ( req->reqType == regCreated );
526 }
528 void
529 VMS__send_req_to_register_new_procr(VirtProcr *newPr, VirtProcr *reqstingPr)
530 { VMSReqst *req;
532 req = malloc( sizeof(VMSReqst) );
533 req->reqType = regCreated;
534 req->semReqData = newPr;
535 req->nextReqst = reqstingPr->requests;
536 reqstingPr->requests = req;
538 VMS__suspend_procr( reqstingPr );
539 }
543 /*This must be called by the request handler plugin -- it cannot be called
544 * from the semantic library "dissipate processor" function -- instead, the
545 * semantic layer has to generate a request for the plug-in to call this
546 * function.
547 *The reason is that this frees the virtual processor's stack -- which is
548 * still in use inside semantic library calls!
549 *
550 *This frees or recycles all the state owned by and comprising the VMS
551 * portion of the animating virtual procr. The request handler must first
552 * free any semantic data created for the processor that didn't use the
553 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
554 * system to disown any state that did use VMS_malloc, and then frees the
555 * statck and the processor-struct itself.
556 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
557 * state, then that state gets freed (or sent to recycling) as a side-effect
558 * of dis-owning it.
559 */
560 void
561 VMS__handle_dissipate_reqst( VirtProcr *animatingPr )
562 {
563 //dis-own all locations owned by this processor, causing to be freed
564 // any locations that it is (was) sole owner of
565 //TODO: implement VMS__malloc system, including "give up ownership"
567 //The dissipate request might still be attached, so remove and free it
568 VMS__free_top_and_give_next_request_from( animatingPr );
570 //NOTE: initialData was given to the processor, so should either have
571 // been alloc'd with VMS__malloc, or freed by the level above animPr.
572 //So, all that's left to free here is the stack and the VirtProcr struc
573 // itself
574 free( animatingPr->startOfStack );
575 free( animatingPr );
576 }
579 //TODO: re-architect so that have clean separation between request handler
580 // and master loop, for dissipate, create, shutdown, and other non-semantic
581 // requests. Issue is chain: one removes requests from AppVP, one dispatches
582 // on type of request, and one handles each type.. but some types require
583 // action from both request handler and master loop -- maybe just give the
584 // request handler calls like: VMS__handle_X_request_type
586 void
587 endOSThreadFn( void *initData, VirtProcr *animatingPr );
589 /*This is called by the semantic layer's request handler when it decides its
590 * time to shut down the VMS system. Calling this causes the core loop OS
591 * threads to exit, which unblocks the entry-point function that started up
592 * VMS, and allows it to grab the result and return to the original single-
593 * threaded application.
594 *
595 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
596 * and-wait function has to free a bunch of stuff after it detects the
597 * threads have all died: the masterEnv, the thread-related locations,
598 * masterVP any AppVPs that might still be allocated and sitting in the
599 * semantic environment, or have been orphaned in the _VMSWorkQ.
600 *
601 *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the
602 * locations it needs, and give ownership to masterVP. Then, they will be
603 * automatically freed when the masterVP is dissipated. (This happens after
604 * the core loop threads have all exited)
605 *
606 *In here,create one core-loop shut-down processor for each core loop and put
607 * them all directly into the readyToAnimateQ.
608 *Note, this function can ONLY be called after the semantic environment no
609 * longer cares if AppVPs get animated after the point this is called. In
610 * other words, this can be used as an abort, or else it should only be
611 * called when all AppVPs have finished dissipate requests -- only at that
612 * point is it sure that all results have completed.
613 */
614 void
615 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr )
616 { int coreIdx;
617 VirtProcr *shutDownPr;
619 //create the shutdown processors, one for each core loop -- put them
620 // directly into the Q -- each core will die when gets one
621 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
622 {
623 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
624 writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
625 }
627 }
630 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
631 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
632 *This function has the sole purpose of setting the stack and framePtr
633 * to the coreLoop's stack and framePtr.. it does that then jumps to the
634 * core loop's shutdown point -- might be able to just call Pthread_exit
635 * from here, but am going back to the pthread's stack and setting everything
636 * up just as if it never jumped out, before calling pthread_exit.
637 *The end-point of core loop will free the stack and so forth of the
638 * processor that animates this function, (this fn is transfering the
639 * animator of the AppVP that is in turn animating this function over
640 * to core loop function -- note that this slices out a level of virtual
641 * processors).
642 */
643 void
644 endOSThreadFn( void *initData, VirtProcr *animatingPr )
645 { void *jmpPt, *coreLoopStackPtr, *coreLoopFramePtr;
647 jmpPt = _VMSMasterEnv->coreLoopEndPt;
648 coreLoopStackPtr = animatingPr->coreLoopStackPtr;
649 coreLoopFramePtr = animatingPr->coreLoopFramePtr;
652 asm volatile("movl %0, %%eax; \
653 movl %1, %%esp; \
654 movl %2, %%ebp; \
655 jmp %%eax " \
656 /* outputs */ : \
657 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
658 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
659 );
660 }
663 /*This is called after the threads have shut down and control has returned
664 * to the semantic layer, in the entry point function in the main thread.
665 * It has to free anything allocated during VMS_init, and any other alloc'd
666 * locations that might be left over.
667 */
668 void
669 VMS__cleanup_after_shutdown()
670 {
671 SRSWQueueStruc **readyToAnimateQs;
672 int coreIdx;
673 VirtProcr **masterVPs;
674 SchedSlot ***allSchedSlots; //ptr to array of ptrs
676 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
677 masterVPs = _VMSMasterEnv->masterVPs;
678 allSchedSlots = _VMSMasterEnv->allSchedSlots;
680 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
681 {
682 freeSRSWQ( readyToAnimateQs[ coreIdx ] );
684 VMS__handle_dissipate_reqst( masterVPs[ coreIdx ] );
686 freeSchedSlots( allSchedSlots[ coreIdx ] );
687 }
689 free( _VMSMasterEnv->readyToAnimateQs );
690 free( _VMSMasterEnv->masterVPs );
691 free( _VMSMasterEnv->allSchedSlots );
693 free( _VMSMasterEnv );
694 }
697 //===========================================================================
699 inline TSCount getTSCount()
700 { unsigned int low, high;
701 TSCount out;
703 saveTimeStampCountInto( low, high );
704 out = high;
705 out = (out << 32) + low;
706 return out;
707 }