view VMS.c @ 134:a9b72021f053

Distributed memory management w/o free requests working
author Merten Sach <msach@mailbox.tu-berlin.de>
date Fri, 16 Sep 2011 16:19:24 +0200
parents dbfc8382d546
children 0b49fd35afc1
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <malloc.h>
11 #include <inttypes.h>
12 #include <sys/time.h>
14 #include "VMS.h"
15 #include "ProcrContext.h"
16 #include "scheduling.h"
17 #include "Queue_impl/BlockingQueue.h"
18 #include "Histogram/Histogram.h"
21 #define thdAttrs NULL
23 //===========================================================================
24 void
25 shutdownFn( void *dummy, VirtProcr *dummy2 );
27 SchedSlot **
28 create_sched_slots();
30 void
31 create_masterEnv();
33 void
34 create_the_coreLoop_OS_threads();
36 MallocProlog *
37 create_free_list();
39 void
40 endOSThreadFn( void *initData, VirtProcr *animatingPr );
42 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
43 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
45 //===========================================================================
47 /*Setup has two phases:
48 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
49 * the master virt procr into the work-queue, ready for first "call"
50 * 2) Semantic layer then does its own init, which creates the seed virt
51 * procr inside the semantic layer, ready to schedule it when
52 * asked by the first run of the masterLoop.
53 *
54 *This part is bit weird because VMS really wants to be "always there", and
55 * have applications attach and detach.. for now, this VMS is part of
56 * the app, so the VMS system starts up as part of running the app.
57 *
58 *The semantic layer is isolated from the VMS internals by making the
59 * semantic layer do setup to a state that it's ready with its
60 * initial virt procrs, ready to schedule them to slots when the masterLoop
61 * asks. Without this pattern, the semantic layer's setup would
62 * have to modify slots directly to assign the initial virt-procrs, and put
63 * them into the readyToAnimateQ itself, breaking the isolation completely.
64 *
65 *
66 *The semantic layer creates the initial virt procr(s), and adds its
67 * own environment to masterEnv, and fills in the pointers to
68 * the requestHandler and slaveScheduler plug-in functions
69 */
71 /*This allocates VMS data structures, populates the master VMSProc,
72 * and master environment, and returns the master environment to the semantic
73 * layer.
74 */
75 void
76 VMS__init()
77 {
78 create_masterEnv();
79 create_the_coreLoop_OS_threads();
80 }
82 #ifdef SEQUENTIAL
84 /*To initialize the sequential version, just don't create the threads
85 */
86 void
87 VMS__init_Seq()
88 {
89 create_masterEnv();
90 }
92 #endif
94 void
95 create_masterEnv()
96 { MasterEnv *masterEnv;
97 VMSQueueStruc **readyToAnimateQs;
98 int coreIdx;
99 VirtProcr **masterVPs;
100 SchedSlot ***allSchedSlots; //ptr to array of ptrs
103 //Make the master env, which holds everything else
104 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
106 //Very first thing put into the master env is the free-list, seeded
107 // with a massive initial chunk of memory.
108 //After this, all other mallocs are VMS__malloc.
109 int i;
110 for(i=0; i<NUM_CORES; i++)
111 {
112 _VMSMasterEnv->freeListHead[i] = VMS_ext__create_free_list();
113 _VMSMasterEnv->interMasterRequestsFor[i] = NULL;
114 }
115 _VMSMasterEnv->currentMasterProcrID = 0;
118 //============================= MEASUREMENT STUFF ========================
119 #ifdef MEAS__TIME_MALLOC
120 _VMSMasterEnv->mallocTimeHist = makeFixedBinHistExt( 100, 0, 100,
121 "malloc_time_hist");
122 _VMSMasterEnv->freeTimeHist = makeFixedBinHistExt( 80, 0, 100,
123 "free_time_hist");
124 #endif
125 #ifdef MEAS__TIME_PLUGIN
126 _VMSMasterEnv->reqHdlrLowTimeHist = makeFixedBinHistExt( 1000, 0, 100,
127 "plugin_low_time_hist");
128 _VMSMasterEnv->reqHdlrHighTimeHist = makeFixedBinHistExt( 1000, 0, 100,
129 "plugin_high_time_hist");
130 #endif
131 //========================================================================
133 //===================== Only VMS__malloc after this ====================
134 masterEnv = (MasterEnv*)_VMSMasterEnv;
136 //Make a readyToAnimateQ for each core loop
137 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
138 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) );
140 //One array for each core, 3 in array, core's masterVP scheds all
141 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) );
143 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr
144 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
145 {
146 readyToAnimateQs[ coreIdx ] = makeVMSQ();
148 //Q: should give masterVP core-specific info as its init data?
149 masterVPs[ coreIdx ] = VMS__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
150 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
151 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
152 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
153 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL;
154 }
155 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
156 _VMSMasterEnv->masterVPs = masterVPs;
157 _VMSMasterEnv->masterLock = UNLOCKED;
158 _VMSMasterEnv->allSchedSlots = allSchedSlots;
159 _VMSMasterEnv->workStealingLock = UNLOCKED;
162 //Aug 19, 2010: no longer need to place initial masterVP into queue
163 // because coreLoop now controls -- animates its masterVP when no work
166 //============================= MEASUREMENT STUFF ========================
167 #ifdef STATS__TURN_ON_PROBES
168 _VMSMasterEnv->dynIntervalProbesInfo =
169 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200);
171 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free );
173 //put creation time directly into master env, for fast retrieval
174 struct timeval timeStamp;
175 gettimeofday( &(timeStamp), NULL);
176 _VMSMasterEnv->createPtInSecs =
177 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
178 #endif
179 #ifdef MEAS__TIME_MASTER_LOCK
180 _VMSMasterEnv->masterLockLowTimeHist = makeFixedBinHist( 50, 0, 2,
181 "master lock low time hist");
182 _VMSMasterEnv->masterLockHighTimeHist = makeFixedBinHist( 50, 0, 100,
183 "master lock high time hist");
184 #endif
186 MakeTheMeasHists();
187 //========================================================================
189 }
191 SchedSlot **
192 create_sched_slots()
193 { SchedSlot **schedSlots;
194 int i;
196 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
198 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
199 {
200 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) );
202 //Set state to mean "handling requests done, slot needs filling"
203 schedSlots[i]->workIsDone = FALSE;
204 schedSlots[i]->needsProcrAssigned = TRUE;
205 }
206 return schedSlots;
207 }
210 void
211 freeSchedSlots( SchedSlot **schedSlots )
212 { int i;
213 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
214 {
215 VMS__free( schedSlots[i] );
216 }
217 VMS__free( schedSlots );
218 }
221 void
222 create_the_coreLoop_OS_threads()
223 {
224 //========================================================================
225 // Create the Threads
226 int coreIdx, retCode;
228 //Need the threads to be created suspended, and wait for a signal
229 // before proceeding -- gives time after creating to initialize other
230 // stuff before the coreLoops set off.
231 _VMSMasterEnv->setupComplete = 0;
233 //Make the threads that animate the core loops
234 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
235 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) );
236 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
238 retCode =
239 pthread_create( &(coreLoopThdHandles[coreIdx]),
240 thdAttrs,
241 &coreLoop,
242 (void *)(coreLoopThdParams[coreIdx]) );
243 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
244 }
245 }
247 /*Semantic layer calls this when it want the system to start running..
248 *
249 *This starts the core loops running then waits for them to exit.
250 */
251 void
252 VMS__start_the_work_then_wait_until_done()
253 { int coreIdx;
254 //Start the core loops running
256 //tell the core loop threads that setup is complete
257 //get lock, to lock out any threads still starting up -- they'll see
258 // that setupComplete is true before entering while loop, and so never
259 // wait on the condition
260 pthread_mutex_lock( &suspendLock );
261 _VMSMasterEnv->setupComplete = 1;
262 pthread_mutex_unlock( &suspendLock );
263 pthread_cond_broadcast( &suspend_cond );
266 //wait for all to complete
267 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
268 {
269 pthread_join( coreLoopThdHandles[coreIdx], NULL );
270 }
272 //NOTE: do not clean up VMS env here -- semantic layer has to have
273 // a chance to clean up its environment first, then do a call to free
274 // the Master env and rest of VMS locations
275 }
277 #ifdef SEQUENTIAL
278 /*Only difference between version with an OS thread pinned to each core and
279 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
280 */
281 void
282 VMS__start_the_work_then_wait_until_done_Seq()
283 {
284 //Instead of un-suspending threads, just call the one and only
285 // core loop (sequential version), in the main thread.
286 coreLoop_Seq( NULL );
287 flushRegisters();
289 }
290 #endif
292 inline VirtProcr *
293 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
294 { VirtProcr *newPr;
295 void *stackLocs;
297 newPr = VMS__malloc( sizeof(VirtProcr) );
298 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE );
299 if( stackLocs == 0 )
300 { perror("VMS__malloc stack"); exit(1); }
302 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
303 }
305 /* "ext" designates that it's for use outside the VMS system -- should only
306 * be called from main thread or other thread -- never from code animated by
307 * a VMS virtual processor.
308 */
309 inline VirtProcr *
310 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
311 { VirtProcr *newPr;
312 char *stackLocs;
314 newPr = malloc( sizeof(VirtProcr) );
315 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
316 if( stackLocs == 0 )
317 { perror("malloc stack"); exit(1); }
319 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
320 }
323 /*Anticipating multi-tasking
324 */
325 void *
326 VMS__give_sem_env_for( VirtProcr *animPr )
327 {
328 return _VMSMasterEnv->semanticEnv;
329 }
330 //===========================================================================
331 /*there is a label inside this function -- save the addr of this label in
332 * the callingPr struc, as the pick-up point from which to start the next
333 * work-unit for that procr. If turns out have to save registers, then
334 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
335 * "done with work-unit" label. The procr struc is in the request in the
336 * slave that animated the just-ended work-unit, so all the state is saved
337 * there, and will get passed along, inside the request handler, to the
338 * next work-unit for that procr.
339 */
340 void
341 VMS__suspend_procr( VirtProcr *animatingPr )
342 {
344 //The request to master will cause this suspended virt procr to get
345 // scheduled again at some future point -- to resume, core loop jumps
346 // to the resume point (below), which causes restore of saved regs and
347 // "return" from this call.
348 //animatingPr->nextInstrPt = &&ResumePt;
350 //return ownership of the virt procr and sched slot to Master virt pr
351 animatingPr->schedSlot->workIsDone = TRUE;
353 //=========================== Measurement stuff ========================
354 #ifdef MEAS__TIME_STAMP_SUSP
355 //record time stamp: compare to time-stamp recorded below
356 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
357 #endif
358 //=======================================================================
360 switchToCoreLoop(animatingPr);
361 flushRegisters();
363 //=======================================================================
365 #ifdef MEAS__TIME_STAMP_SUSP
366 //NOTE: only take low part of count -- do sanity check when take diff
367 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
368 #endif
370 return;
371 }
375 /*For this implementation of VMS, it may not make much sense to have the
376 * system of requests for creating a new processor done this way.. but over
377 * the scope of single-master, multi-master, mult-tasking, OS-implementing,
378 * distributed-memory, and so on, this gives VMS implementation a chance to
379 * do stuff before suspend, in the AppVP, and in the Master before the plugin
380 * is called, as well as in the lang-lib before this is called, and in the
381 * plugin. So, this gives both VMS and language implementations a chance to
382 * intercept at various points and do order-dependent stuff.
383 *Having a standard VMSNewPrReqData struc allows the language to create and
384 * free the struc, while VMS knows how to get the newPr if it wants it, and
385 * it lets the lang have lang-specific data related to creation transported
386 * to the plugin.
387 */
388 void
389 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr )
390 { VMSReqst req;
392 req.reqType = createReq;
393 req.semReqData = semReqData;
394 req.nextReqst = reqstingPr->requests;
395 reqstingPr->requests = &req;
397 VMS__suspend_procr( reqstingPr );
398 }
401 /*
402 *This adds a request to dissipate, then suspends the processor so that the
403 * request handler will receive the request. The request handler is what
404 * does the work of freeing memory and removing the processor from the
405 * semantic environment's data structures.
406 *The request handler also is what figures out when to shutdown the VMS
407 * system -- which causes all the core loop threads to die, and returns from
408 * the call that started up VMS to perform the work.
409 *
410 *This form is a bit misleading to understand if one is trying to figure out
411 * how VMS works -- it looks like a normal function call, but inside it
412 * sends a request to the request handler and suspends the processor, which
413 * jumps out of the VMS__dissipate_procr function, and out of all nestings
414 * above it, transferring the work of dissipating to the request handler,
415 * which then does the actual work -- causing the processor that animated
416 * the call of this function to disappear and the "hanging" state of this
417 * function to just poof into thin air -- the virtual processor's trace
418 * never returns from this call, but instead the virtual processor's trace
419 * gets suspended in this call and all the virt processor's state disap-
420 * pears -- making that suspend the last thing in the virt procr's trace.
421 */
422 void
423 VMS__send_dissipate_req( VirtProcr *procrToDissipate )
424 { VMSReqst req;
426 req.reqType = dissipate;
427 req.nextReqst = procrToDissipate->requests;
428 procrToDissipate->requests = &req;
430 VMS__suspend_procr( procrToDissipate );
431 }
434 /* "ext" designates that it's for use outside the VMS system -- should only
435 * be called from main thread or other thread -- never from code animated by
436 * a VMS virtual processor.
437 *
438 *Use this version to dissipate VPs created outside the VMS system.
439 */
440 void
441 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate )
442 {
443 //NOTE: initialData was given to the processor, so should either have
444 // been alloc'd with VMS__malloc, or freed by the level above animPr.
445 //So, all that's left to free here is the stack and the VirtProcr struc
446 // itself
447 //Note, should not stack-allocate initial data -- no guarantee, in
448 // general that creating processor will outlive ones it creates.
449 free( procrToDissipate->startOfStack );
450 free( procrToDissipate );
451 }
455 /*This call's name indicates that request is malloc'd -- so req handler
456 * has to free any extra requests tacked on before a send, using this.
457 *
458 * This inserts the semantic-layer's request data into standard VMS carrier
459 * request data-struct that is mallocd. The sem request doesn't need to
460 * be malloc'd if this is called inside the same call chain before the
461 * send of the last request is called.
462 *
463 *The request handler has to call VMS__free_VMSReq for any of these
464 */
465 inline void
466 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
467 VirtProcr *callingPr )
468 { VMSReqst *req;
470 req = VMS__malloc( sizeof(VMSReqst) );
471 req->reqType = semantic;
472 req->semReqData = semReqData;
473 req->nextReqst = callingPr->requests;
474 callingPr->requests = req;
475 }
477 /*This inserts the semantic-layer's request data into standard VMS carrier
478 * request data-struct is allocated on stack of this call & ptr to it sent
479 * to plugin
480 *Then it does suspend, to cause request to be sent.
481 */
482 inline void
483 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
484 { VMSReqst req;
486 req.reqType = semantic;
487 req.semReqData = semReqData;
488 req.nextReqst = callingPr->requests;
489 callingPr->requests = &req;
491 VMS__suspend_procr( callingPr );
492 }
495 inline void
496 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr )
497 { VMSReqst req;
499 req.reqType = VMSSemantic;
500 req.semReqData = semReqData;
501 req.nextReqst = callingPr->requests; //gab any other preceeding
502 callingPr->requests = &req;
504 VMS__suspend_procr( callingPr );
505 }
507 void inline
508 VMS__send_inter_plugin_req( void *reqData, int32 targetMaster,
509 VirtProcr *requestingMaster )
510 { _VMSMasterEnv->interMasterRequestsFor[targetMaster] =
511 (InterMasterReqst *) reqData;
512 }
514 void inline
515 VMS__send_inter_VMSCore_req( InterVMSCoreReqst *reqData,
516 int32 targetMaster, VirtProcr *requestingMaster )
517 { _VMSMasterEnv->interMasterRequestsFor[targetMaster] =
518 (InterMasterReqst *) reqData;
519 }
521 /*
522 */
523 VMSReqst *
524 VMS__take_next_request_out_of( VirtProcr *procrWithReq )
525 { VMSReqst *req;
527 req = procrWithReq->requests;
528 if( req == NULL ) return NULL;
530 procrWithReq->requests = procrWithReq->requests->nextReqst;
531 return req;
532 }
535 inline void *
536 VMS__take_sem_reqst_from( VMSReqst *req )
537 {
538 return req->semReqData;
539 }
543 /* This is for OS requests and VMS infrastructure requests, such as to create
544 * a probe -- a probe is inside the heart of VMS-core, it's not part of any
545 * language -- but it's also a semantic thing that's triggered from and used
546 * in the application.. so it crosses abstractions.. so, need some special
547 * pattern here for handling such requests.
548 * Doing this just like it were a second language sharing VMS-core.
549 *
550 * This is called from the language's request handler when it sees a request
551 * of type VMSSemReq
552 *
553 * TODO: Later change this, to give probes their own separate plugin & have
554 * VMS-core steer the request to appropriate plugin
555 * Do the same for OS calls -- look later at it..
556 */
557 void inline
558 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv,
559 ResumePrFnPtr resumePrFnPtr )
560 { VMSSemReq *semReq;
561 IntervalProbe *newProbe;
563 semReq = req->semReqData;
565 newProbe = VMS__malloc( sizeof(IntervalProbe) );
566 newProbe->nameStr = VMS__strDup( semReq->nameStr );
567 newProbe->hist = NULL;
568 newProbe->schedChoiceWasRecorded = FALSE;
570 //This runs in masterVP, so no race-condition worries
571 newProbe->probeID =
572 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo );
574 requestingPr->dataRetFromReq = newProbe;
576 (*resumePrFnPtr)( requestingPr, semEnv );
577 }
581 /*This must be called by the request handler plugin -- it cannot be called
582 * from the semantic library "dissipate processor" function -- instead, the
583 * semantic layer has to generate a request, and the plug-in calls this
584 * function.
585 *The reason is that this frees the virtual processor's stack -- which is
586 * still in use inside semantic library calls!
587 *
588 *This frees or recycles all the state owned by and comprising the VMS
589 * portion of the animating virtual procr. The request handler must first
590 * free any semantic data created for the processor that didn't use the
591 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
592 * system to disown any state that did use VMS_malloc, and then frees the
593 * statck and the processor-struct itself.
594 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
595 * state, then that state gets freed (or sent to recycling) as a side-effect
596 * of dis-owning it.
597 */
598 void
599 VMS__dissipate_procr( VirtProcr *animatingPr )
600 {
601 //dis-own all locations owned by this processor, causing to be freed
602 // any locations that it is (was) sole owner of
603 //TODO: implement VMS__malloc system, including "give up ownership"
606 //NOTE: initialData was given to the processor, so should either have
607 // been alloc'd with VMS__malloc, or freed by the level above animPr.
608 //So, all that's left to free here is the stack and the VirtProcr struc
609 // itself
610 //Note, should not stack-allocate initial data -- no guarantee, in
611 // general that creating processor will outlive ones it creates.
612 VMS__free( animatingPr->startOfStack );
613 VMS__free( animatingPr );
614 }
617 //TODO: look at architecting cleanest separation between request handler
618 // and master loop, for dissipate, create, shutdown, and other non-semantic
619 // requests. Issue is chain: one removes requests from AppVP, one dispatches
620 // on type of request, and one handles each type.. but some types require
621 // action from both request handler and master loop -- maybe just give the
622 // request handler calls like: VMS__handle_X_request_type
625 /*This is called by the semantic layer's request handler when it decides its
626 * time to shut down the VMS system. Calling this causes the core loop OS
627 * threads to exit, which unblocks the entry-point function that started up
628 * VMS, and allows it to grab the result and return to the original single-
629 * threaded application.
630 *
631 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
632 * and-wait function has to free a bunch of stuff after it detects the
633 * threads have all died: the masterEnv, the thread-related locations,
634 * masterVP any AppVPs that might still be allocated and sitting in the
635 * semantic environment, or have been orphaned in the _VMSWorkQ.
636 *
637 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
638 * locations it needs, and give ownership to masterVP. Then, they will be
639 * automatically freed.
640 *
641 *In here,create one core-loop shut-down processor for each core loop and put
642 * them all directly into the readyToAnimateQ.
643 *Note, this function can ONLY be called after the semantic environment no
644 * longer cares if AppVPs get animated after the point this is called. In
645 * other words, this can be used as an abort, or else it should only be
646 * called when all AppVPs have finished dissipate requests -- only at that
647 * point is it sure that all results have completed.
648 */
649 void
650 VMS__shutdown()
651 { int coreIdx;
652 VirtProcr *shutDownPr;
654 //create the shutdown processors, one for each core loop -- put them
655 // directly into the Q -- each core will die when gets one
656 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
657 { //Note, this is running in the master
658 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
659 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
660 }
662 }
665 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
666 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
667 *This function has the sole purpose of setting the stack and framePtr
668 * to the coreLoop's stack and framePtr.. it does that then jumps to the
669 * core loop's shutdown point -- might be able to just call Pthread_exit
670 * from here, but am going back to the pthread's stack and setting everything
671 * up just as if it never jumped out, before calling pthread_exit.
672 *The end-point of core loop will free the stack and so forth of the
673 * processor that animates this function, (this fn is transfering the
674 * animator of the AppVP that is in turn animating this function over
675 * to core loop function -- note that this slices out a level of virtual
676 * processors).
677 */
678 void
679 endOSThreadFn( void *initData, VirtProcr *animatingPr )
680 {
681 #ifdef SEQUENTIAL
682 asmTerminateCoreLoopSeq(animatingPr);
683 #else
684 asmTerminateCoreLoop(animatingPr);
685 #endif
686 }
689 /*This is called from the startup & shutdown
690 */
691 void
692 VMS__cleanup_at_end_of_shutdown()
693 {
694 //unused
695 //VMSQueueStruc **readyToAnimateQs;
696 //int coreIdx;
697 //VirtProcr **masterVPs;
698 //SchedSlot ***allSchedSlots; //ptr to array of ptrs
700 //Before getting rid of everything, print out any measurements made
701 forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
702 forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
703 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, &freeHistExt );
704 #ifdef MEAS__TIME_PLUGIN
705 printHist( _VMSMasterEnv->reqHdlrLowTimeHist );
706 saveHistToFile( _VMSMasterEnv->reqHdlrLowTimeHist );
707 printHist( _VMSMasterEnv->reqHdlrHighTimeHist );
708 saveHistToFile( _VMSMasterEnv->reqHdlrHighTimeHist );
709 freeHistExt( _VMSMasterEnv->reqHdlrLowTimeHist );
710 freeHistExt( _VMSMasterEnv->reqHdlrHighTimeHist );
711 #endif
712 #ifdef MEAS__TIME_MALLOC
713 printHist( _VMSMasterEnv->mallocTimeHist );
714 saveHistToFile( _VMSMasterEnv->mallocTimeHist );
715 printHist( _VMSMasterEnv->freeTimeHist );
716 saveHistToFile( _VMSMasterEnv->freeTimeHist );
717 freeHistExt( _VMSMasterEnv->mallocTimeHist );
718 freeHistExt( _VMSMasterEnv->freeTimeHist );
719 #endif
720 #ifdef MEAS__TIME_MASTER_LOCK
721 printHist( _VMSMasterEnv->masterLockLowTimeHist );
722 printHist( _VMSMasterEnv->masterLockHighTimeHist );
723 #endif
724 #ifdef MEAS__TIME_MASTER
725 printHist( _VMSMasterEnv->pluginTimeHist );
726 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
727 {
728 freeVMSQ( readyToAnimateQs[ coreIdx ] );
729 //master VPs were created external to VMS, so use external free
730 VMS__dissipate_procr( masterVPs[ coreIdx ] );
732 freeSchedSlots( allSchedSlots[ coreIdx ] );
733 }
734 #endif
735 #ifdef MEAS__TIME_STAMP_SUSP
736 printHist( _VMSMasterEnv->pluginTimeHist );
737 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
738 {
739 freeVMSQ( readyToAnimateQs[ coreIdx ] );
740 //master VPs were created external to VMS, so use external free
741 VMS__dissipate_procr( masterVPs[ coreIdx ] );
743 freeSchedSlots( allSchedSlots[ coreIdx ] );
744 }
745 #endif
747 //All the environment data has been allocated with VMS__malloc, so just
748 // free its internal big-chunk and all inside it disappear.
749 /*
750 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
751 masterVPs = _VMSMasterEnv->masterVPs;
752 allSchedSlots = _VMSMasterEnv->allSchedSlots;
754 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
755 {
756 freeVMSQ( readyToAnimateQs[ coreIdx ] );
757 //master VPs were created external to VMS, so use external free
758 VMS__dissipate_procr( masterVPs[ coreIdx ] );
760 freeSchedSlots( allSchedSlots[ coreIdx ] );
761 }
763 VMS__free( _VMSMasterEnv->readyToAnimateQs );
764 VMS__free( _VMSMasterEnv->masterVPs );
765 VMS__free( _VMSMasterEnv->allSchedSlots );
767 //============================= MEASUREMENT STUFF ========================
768 #ifdef STATS__TURN_ON_PROBES
769 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe);
770 #endif
771 //========================================================================
772 */
773 //These are the only two that use system free
774 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead);
775 free( (void *)_VMSMasterEnv );
776 }
779 //================================
782 /*Later, improve this -- for now, just exits the application after printing
783 * the error message.
784 */
785 void
786 VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData )
787 {
788 printf("%s",msgStr);
789 fflush(stdin);
790 exit(1);
791 }