Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
view VMS.c @ 135:0b49fd35afc1
distributed free working
-app sends a VMSSemReqst to his Master which send a request to a different Master
-Master send the request directly
-The request structure is freed by the sender, when the request was handled
There are still problems on shutdown. The shutdownVPs are all allocated by one Master which is likly to be terminated
| author | Merten Sach <msach@mailbox.tu-berlin.de> |
|---|---|
| date | Fri, 16 Sep 2011 20:08:28 +0200 |
| parents | a9b72021f053 |
| children | f1374d5dbb98 |
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <malloc.h>
11 #include <inttypes.h>
12 #include <sys/time.h>
14 #include "VMS.h"
15 #include "ProcrContext.h"
16 #include "scheduling.h"
17 #include "Queue_impl/BlockingQueue.h"
18 #include "Histogram/Histogram.h"
21 #define thdAttrs NULL
23 //===========================================================================
24 void
25 shutdownFn( void *dummy, VirtProcr *dummy2 );
27 SchedSlot **
28 create_sched_slots();
30 void
31 create_masterEnv();
33 void
34 create_the_coreLoop_OS_threads();
36 MallocProlog *
37 create_free_list();
39 void
40 endOSThreadFn( void *initData, VirtProcr *animatingPr );
42 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
43 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
45 //===========================================================================
47 /*Setup has two phases:
48 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
49 * the master virt procr into the work-queue, ready for first "call"
50 * 2) Semantic layer then does its own init, which creates the seed virt
51 * procr inside the semantic layer, ready to schedule it when
52 * asked by the first run of the masterLoop.
53 *
54 *This part is bit weird because VMS really wants to be "always there", and
55 * have applications attach and detach.. for now, this VMS is part of
56 * the app, so the VMS system starts up as part of running the app.
57 *
58 *The semantic layer is isolated from the VMS internals by making the
59 * semantic layer do setup to a state that it's ready with its
60 * initial virt procrs, ready to schedule them to slots when the masterLoop
61 * asks. Without this pattern, the semantic layer's setup would
62 * have to modify slots directly to assign the initial virt-procrs, and put
63 * them into the readyToAnimateQ itself, breaking the isolation completely.
64 *
65 *
66 *The semantic layer creates the initial virt procr(s), and adds its
67 * own environment to masterEnv, and fills in the pointers to
68 * the requestHandler and slaveScheduler plug-in functions
69 */
71 /*This allocates VMS data structures, populates the master VMSProc,
72 * and master environment, and returns the master environment to the semantic
73 * layer.
74 */
75 void
76 VMS__init()
77 {
78 create_masterEnv();
79 create_the_coreLoop_OS_threads();
80 }
82 #ifdef SEQUENTIAL
84 /*To initialize the sequential version, just don't create the threads
85 */
86 void
87 VMS__init_Seq()
88 {
89 create_masterEnv();
90 }
92 #endif
94 void
95 create_masterEnv()
96 { MasterEnv *masterEnv;
97 VMSQueueStruc **readyToAnimateQs;
98 int coreIdx;
99 VirtProcr **masterVPs;
100 SchedSlot ***allSchedSlots; //ptr to array of ptrs
103 //Make the master env, which holds everything else
104 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
106 //Very first thing put into the master env is the free-list, seeded
107 // with a massive initial chunk of memory.
108 //After this, all other mallocs are VMS__malloc.
109 int i;
110 for(i=0; i<NUM_CORES; i++)
111 {
112 _VMSMasterEnv->freeListHead[i] = VMS_ext__create_free_list();
113 _VMSMasterEnv->interMasterRequestsFor[i] = NULL;
114 _VMSMasterEnv->interMasterRequestsSentBy[i] = NULL;
115 }
116 _VMSMasterEnv->currentMasterProcrID = 0;
119 //============================= MEASUREMENT STUFF ========================
120 #ifdef MEAS__TIME_MALLOC
121 _VMSMasterEnv->mallocTimeHist = makeFixedBinHistExt( 100, 0, 100,
122 "malloc_time_hist");
123 _VMSMasterEnv->freeTimeHist = makeFixedBinHistExt( 80, 0, 100,
124 "free_time_hist");
125 #endif
126 #ifdef MEAS__TIME_PLUGIN
127 _VMSMasterEnv->reqHdlrLowTimeHist = makeFixedBinHistExt( 1000, 0, 100,
128 "plugin_low_time_hist");
129 _VMSMasterEnv->reqHdlrHighTimeHist = makeFixedBinHistExt( 1000, 0, 100,
130 "plugin_high_time_hist");
131 #endif
132 //========================================================================
134 //===================== Only VMS__malloc after this ====================
135 masterEnv = (MasterEnv*)_VMSMasterEnv;
137 //Make a readyToAnimateQ for each core loop
138 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
139 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) );
141 //One array for each core, 3 in array, core's masterVP scheds all
142 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) );
144 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr
145 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
146 {
147 readyToAnimateQs[ coreIdx ] = makeVMSQ();
149 //Q: should give masterVP core-specific info as its init data?
150 masterVPs[ coreIdx ] = VMS__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
151 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
152 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
153 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
154 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL;
155 }
156 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
157 _VMSMasterEnv->masterVPs = masterVPs;
158 _VMSMasterEnv->masterLock = UNLOCKED;
159 _VMSMasterEnv->allSchedSlots = allSchedSlots;
160 _VMSMasterEnv->workStealingLock = UNLOCKED;
163 //Aug 19, 2010: no longer need to place initial masterVP into queue
164 // because coreLoop now controls -- animates its masterVP when no work
167 //============================= MEASUREMENT STUFF ========================
168 #ifdef STATS__TURN_ON_PROBES
169 _VMSMasterEnv->dynIntervalProbesInfo =
170 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200);
172 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free );
174 //put creation time directly into master env, for fast retrieval
175 struct timeval timeStamp;
176 gettimeofday( &(timeStamp), NULL);
177 _VMSMasterEnv->createPtInSecs =
178 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
179 #endif
180 #ifdef MEAS__TIME_MASTER_LOCK
181 _VMSMasterEnv->masterLockLowTimeHist = makeFixedBinHist( 50, 0, 2,
182 "master lock low time hist");
183 _VMSMasterEnv->masterLockHighTimeHist = makeFixedBinHist( 50, 0, 100,
184 "master lock high time hist");
185 #endif
187 MakeTheMeasHists();
188 //========================================================================
190 }
192 SchedSlot **
193 create_sched_slots()
194 { SchedSlot **schedSlots;
195 int i;
197 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
199 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
200 {
201 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) );
203 //Set state to mean "handling requests done, slot needs filling"
204 schedSlots[i]->workIsDone = FALSE;
205 schedSlots[i]->needsProcrAssigned = TRUE;
206 }
207 return schedSlots;
208 }
211 void
212 freeSchedSlots( SchedSlot **schedSlots )
213 { int i;
214 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
215 {
216 VMS__free( schedSlots[i] );
217 }
218 VMS__free( schedSlots );
219 }
222 void
223 create_the_coreLoop_OS_threads()
224 {
225 //========================================================================
226 // Create the Threads
227 int coreIdx, retCode;
229 //Need the threads to be created suspended, and wait for a signal
230 // before proceeding -- gives time after creating to initialize other
231 // stuff before the coreLoops set off.
232 _VMSMasterEnv->setupComplete = 0;
234 //Make the threads that animate the core loops
235 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
236 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) );
237 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
239 retCode =
240 pthread_create( &(coreLoopThdHandles[coreIdx]),
241 thdAttrs,
242 &coreLoop,
243 (void *)(coreLoopThdParams[coreIdx]) );
244 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
245 }
246 }
248 /*Semantic layer calls this when it want the system to start running..
249 *
250 *This starts the core loops running then waits for them to exit.
251 */
252 void
253 VMS__start_the_work_then_wait_until_done()
254 { int coreIdx;
255 //Start the core loops running
257 //tell the core loop threads that setup is complete
258 //get lock, to lock out any threads still starting up -- they'll see
259 // that setupComplete is true before entering while loop, and so never
260 // wait on the condition
261 pthread_mutex_lock( &suspendLock );
262 _VMSMasterEnv->setupComplete = 1;
263 pthread_mutex_unlock( &suspendLock );
264 pthread_cond_broadcast( &suspend_cond );
267 //wait for all to complete
268 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
269 {
270 pthread_join( coreLoopThdHandles[coreIdx], NULL );
271 }
273 //NOTE: do not clean up VMS env here -- semantic layer has to have
274 // a chance to clean up its environment first, then do a call to free
275 // the Master env and rest of VMS locations
276 }
278 #ifdef SEQUENTIAL
279 /*Only difference between version with an OS thread pinned to each core and
280 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
281 */
282 void
283 VMS__start_the_work_then_wait_until_done_Seq()
284 {
285 //Instead of un-suspending threads, just call the one and only
286 // core loop (sequential version), in the main thread.
287 coreLoop_Seq( NULL );
288 flushRegisters();
290 }
291 #endif
293 inline VirtProcr *
294 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
295 { VirtProcr *newPr;
296 void *stackLocs;
298 newPr = VMS__malloc( sizeof(VirtProcr) );
299 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE );
300 if( stackLocs == 0 )
301 { perror("VMS__malloc stack"); exit(1); }
303 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
304 }
306 /* "ext" designates that it's for use outside the VMS system -- should only
307 * be called from main thread or other thread -- never from code animated by
308 * a VMS virtual processor.
309 */
310 inline VirtProcr *
311 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
312 { VirtProcr *newPr;
313 char *stackLocs;
315 newPr = malloc( sizeof(VirtProcr) );
316 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
317 if( stackLocs == 0 )
318 { perror("malloc stack"); exit(1); }
320 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
321 }
324 /*Anticipating multi-tasking
325 */
326 void *
327 VMS__give_sem_env_for( VirtProcr *animPr )
328 {
329 return _VMSMasterEnv->semanticEnv;
330 }
331 //===========================================================================
332 /*there is a label inside this function -- save the addr of this label in
333 * the callingPr struc, as the pick-up point from which to start the next
334 * work-unit for that procr. If turns out have to save registers, then
335 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
336 * "done with work-unit" label. The procr struc is in the request in the
337 * slave that animated the just-ended work-unit, so all the state is saved
338 * there, and will get passed along, inside the request handler, to the
339 * next work-unit for that procr.
340 */
341 void
342 VMS__suspend_procr( VirtProcr *animatingPr )
343 {
345 //The request to master will cause this suspended virt procr to get
346 // scheduled again at some future point -- to resume, core loop jumps
347 // to the resume point (below), which causes restore of saved regs and
348 // "return" from this call.
349 //animatingPr->nextInstrPt = &&ResumePt;
351 //return ownership of the virt procr and sched slot to Master virt pr
352 animatingPr->schedSlot->workIsDone = TRUE;
354 //=========================== Measurement stuff ========================
355 #ifdef MEAS__TIME_STAMP_SUSP
356 //record time stamp: compare to time-stamp recorded below
357 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
358 #endif
359 //=======================================================================
361 switchToCoreLoop(animatingPr);
362 flushRegisters();
364 //=======================================================================
366 #ifdef MEAS__TIME_STAMP_SUSP
367 //NOTE: only take low part of count -- do sanity check when take diff
368 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
369 #endif
371 return;
372 }
376 /*For this implementation of VMS, it may not make much sense to have the
377 * system of requests for creating a new processor done this way.. but over
378 * the scope of single-master, multi-master, mult-tasking, OS-implementing,
379 * distributed-memory, and so on, this gives VMS implementation a chance to
380 * do stuff before suspend, in the AppVP, and in the Master before the plugin
381 * is called, as well as in the lang-lib before this is called, and in the
382 * plugin. So, this gives both VMS and language implementations a chance to
383 * intercept at various points and do order-dependent stuff.
384 *Having a standard VMSNewPrReqData struc allows the language to create and
385 * free the struc, while VMS knows how to get the newPr if it wants it, and
386 * it lets the lang have lang-specific data related to creation transported
387 * to the plugin.
388 */
389 void
390 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr )
391 { VMSReqst req;
393 req.reqType = createReq;
394 req.semReqData = semReqData;
395 req.nextReqst = reqstingPr->requests;
396 reqstingPr->requests = &req;
398 VMS__suspend_procr( reqstingPr );
399 }
402 /*
403 *This adds a request to dissipate, then suspends the processor so that the
404 * request handler will receive the request. The request handler is what
405 * does the work of freeing memory and removing the processor from the
406 * semantic environment's data structures.
407 *The request handler also is what figures out when to shutdown the VMS
408 * system -- which causes all the core loop threads to die, and returns from
409 * the call that started up VMS to perform the work.
410 *
411 *This form is a bit misleading to understand if one is trying to figure out
412 * how VMS works -- it looks like a normal function call, but inside it
413 * sends a request to the request handler and suspends the processor, which
414 * jumps out of the VMS__dissipate_procr function, and out of all nestings
415 * above it, transferring the work of dissipating to the request handler,
416 * which then does the actual work -- causing the processor that animated
417 * the call of this function to disappear and the "hanging" state of this
418 * function to just poof into thin air -- the virtual processor's trace
419 * never returns from this call, but instead the virtual processor's trace
420 * gets suspended in this call and all the virt processor's state disap-
421 * pears -- making that suspend the last thing in the virt procr's trace.
422 */
423 void
424 VMS__send_dissipate_req( VirtProcr *procrToDissipate )
425 { VMSReqst req;
427 req.reqType = dissipate;
428 req.nextReqst = procrToDissipate->requests;
429 procrToDissipate->requests = &req;
431 VMS__suspend_procr( procrToDissipate );
432 }
435 /* "ext" designates that it's for use outside the VMS system -- should only
436 * be called from main thread or other thread -- never from code animated by
437 * a VMS virtual processor.
438 *
439 *Use this version to dissipate VPs created outside the VMS system.
440 */
441 void
442 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate )
443 {
444 //NOTE: initialData was given to the processor, so should either have
445 // been alloc'd with VMS__malloc, or freed by the level above animPr.
446 //So, all that's left to free here is the stack and the VirtProcr struc
447 // itself
448 //Note, should not stack-allocate initial data -- no guarantee, in
449 // general that creating processor will outlive ones it creates.
450 free( procrToDissipate->startOfStack );
451 free( procrToDissipate );
452 }
456 /*This call's name indicates that request is malloc'd -- so req handler
457 * has to free any extra requests tacked on before a send, using this.
458 *
459 * This inserts the semantic-layer's request data into standard VMS carrier
460 * request data-struct that is mallocd. The sem request doesn't need to
461 * be malloc'd if this is called inside the same call chain before the
462 * send of the last request is called.
463 *
464 *The request handler has to call VMS__free_VMSReq for any of these
465 */
466 inline void
467 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
468 VirtProcr *callingPr )
469 { VMSReqst *req;
471 req = VMS__malloc( sizeof(VMSReqst) );
472 req->reqType = semantic;
473 req->semReqData = semReqData;
474 req->nextReqst = callingPr->requests;
475 callingPr->requests = req;
476 }
478 /*This inserts the semantic-layer's request data into standard VMS carrier
479 * request data-struct is allocated on stack of this call & ptr to it sent
480 * to plugin
481 *Then it does suspend, to cause request to be sent.
482 */
483 inline void
484 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
485 { VMSReqst req;
487 req.reqType = semantic;
488 req.semReqData = semReqData;
489 req.nextReqst = callingPr->requests;
490 callingPr->requests = &req;
492 VMS__suspend_procr( callingPr );
493 }
496 inline void
497 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr )
498 { VMSReqst req;
500 req.reqType = VMSSemantic;
501 req.semReqData = semReqData;
502 req.nextReqst = callingPr->requests; //gab any other preceeding
503 callingPr->requests = &req;
505 VMS__suspend_procr( callingPr );
506 }
508 void inline
509 VMS__send_inter_plugin_req( void *reqData, int32 targetMaster,
510 VirtProcr *requestingMaster )
511 { _VMSMasterEnv->interMasterRequestsFor[targetMaster] =
512 (InterMasterReqst *) reqData;
513 }
515 void inline
516 VMS__send_inter_VMSCore_req( InterVMSCoreReqst *reqData,
517 int32 targetMaster, VirtProcr *requestingMaster )
518 { _VMSMasterEnv->interMasterRequestsFor[targetMaster] =
519 (InterMasterReqst *) reqData;
520 }
522 /*
523 */
524 VMSReqst *
525 VMS__take_next_request_out_of( VirtProcr *procrWithReq )
526 { VMSReqst *req;
528 req = procrWithReq->requests;
529 if( req == NULL ) return NULL;
531 procrWithReq->requests = procrWithReq->requests->nextReqst;
532 return req;
533 }
536 inline void *
537 VMS__take_sem_reqst_from( VMSReqst *req )
538 {
539 return req->semReqData;
540 }
544 /* This is for OS requests and VMS infrastructure requests, such as to create
545 * a probe -- a probe is inside the heart of VMS-core, it's not part of any
546 * language -- but it's also a semantic thing that's triggered from and used
547 * in the application.. so it crosses abstractions.. so, need some special
548 * pattern here for handling such requests.
549 * Doing this just like it were a second language sharing VMS-core.
550 *
551 * This is called from the language's request handler when it sees a request
552 * of type VMSSemReq
553 *
554 * TODO: Later change this, to give probes their own separate plugin & have
555 * VMS-core steer the request to appropriate plugin
556 * Do the same for OS calls -- look later at it..
557 */
558 void inline
559 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv,
560 ResumePrFnPtr resumePrFnPtr )
561 { VMSSemReq *semReq;
562 IntervalProbe *newProbe;
564 semReq = req->semReqData;
566 switch(semReq->reqType){
567 case createProbe:
568 newProbe = VMS__malloc( sizeof(IntervalProbe) );
569 newProbe->nameStr = VMS__strDup( (char*)semReq->data );
570 newProbe->hist = NULL;
571 newProbe->schedChoiceWasRecorded = FALSE;
573 //This runs in masterVP, so no race-condition worries
574 newProbe->probeID =
575 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo );
576 requestingPr->dataRetFromReq = newProbe;
577 break;
578 case interMasterReqst:
579 sendInterMasterReqst(semReq->receiverID,
580 (InterMasterReqst*)semReq->data);
581 break;
582 default:
583 break;
584 }
586 resumePrFnPtr( requestingPr, semEnv );
587 }
591 /*This must be called by the request handler plugin -- it cannot be called
592 * from the semantic library "dissipate processor" function -- instead, the
593 * semantic layer has to generate a request, and the plug-in calls this
594 * function.
595 *The reason is that this frees the virtual processor's stack -- which is
596 * still in use inside semantic library calls!
597 *
598 *This frees or recycles all the state owned by and comprising the VMS
599 * portion of the animating virtual procr. The request handler must first
600 * free any semantic data created for the processor that didn't use the
601 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
602 * system to disown any state that did use VMS_malloc, and then frees the
603 * statck and the processor-struct itself.
604 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
605 * state, then that state gets freed (or sent to recycling) as a side-effect
606 * of dis-owning it.
607 */
608 void
609 VMS__dissipate_procr( VirtProcr *animatingPr )
610 {
611 //dis-own all locations owned by this processor, causing to be freed
612 // any locations that it is (was) sole owner of
613 //TODO: implement VMS__malloc system, including "give up ownership"
616 //NOTE: initialData was given to the processor, so should either have
617 // been alloc'd with VMS__malloc, or freed by the level above animPr.
618 //So, all that's left to free here is the stack and the VirtProcr struc
619 // itself
620 //Note, should not stack-allocate initial data -- no guarantee, in
621 // general that creating processor will outlive ones it creates.
624 /*
625 * call the core specific version, because the creating master can already be dead
626 */
627 //VMS__free_in_lib( animatingPr->startOfStack, animatingPr );
628 //VMS__free_in_lib( animatingPr, animatingPr);
629 }
632 //TODO: look at architecting cleanest separation between request handler
633 // and master loop, for dissipate, create, shutdown, and other non-semantic
634 // requests. Issue is chain: one removes requests from AppVP, one dispatches
635 // on type of request, and one handles each type.. but some types require
636 // action from both request handler and master loop -- maybe just give the
637 // request handler calls like: VMS__handle_X_request_type
640 /*This is called by the semantic layer's request handler when it decides its
641 * time to shut down the VMS system. Calling this causes the core loop OS
642 * threads to exit, which unblocks the entry-point function that started up
643 * VMS, and allows it to grab the result and return to the original single-
644 * threaded application.
645 *
646 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
647 * and-wait function has to free a bunch of stuff after it detects the
648 * threads have all died: the masterEnv, the thread-related locations,
649 * masterVP any AppVPs that might still be allocated and sitting in the
650 * semantic environment, or have been orphaned in the _VMSWorkQ.
651 *
652 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
653 * locations it needs, and give ownership to masterVP. Then, they will be
654 * automatically freed.
655 *
656 *In here,create one core-loop shut-down processor for each core loop and put
657 * them all directly into the readyToAnimateQ.
658 *Note, this function can ONLY be called after the semantic environment no
659 * longer cares if AppVPs get animated after the point this is called. In
660 * other words, this can be used as an abort, or else it should only be
661 * called when all AppVPs have finished dissipate requests -- only at that
662 * point is it sure that all results have completed.
663 */
664 void
665 VMS__shutdown()
666 { int coreIdx;
667 VirtProcr *shutDownPr;
669 //create the shutdown processors, one for each core loop -- put them
670 // directly into the Q -- each core will die when gets one
671 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
672 { //Note, this is running in the master
673 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
674 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
675 }
677 }
680 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
681 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
682 *This function has the sole purpose of setting the stack and framePtr
683 * to the coreLoop's stack and framePtr.. it does that then jumps to the
684 * core loop's shutdown point -- might be able to just call Pthread_exit
685 * from here, but am going back to the pthread's stack and setting everything
686 * up just as if it never jumped out, before calling pthread_exit.
687 *The end-point of core loop will free the stack and so forth of the
688 * processor that animates this function, (this fn is transfering the
689 * animator of the AppVP that is in turn animating this function over
690 * to core loop function -- note that this slices out a level of virtual
691 * processors).
692 */
693 void
694 endOSThreadFn( void *initData, VirtProcr *animatingPr )
695 {
696 #ifdef SEQUENTIAL
697 asmTerminateCoreLoopSeq(animatingPr);
698 #else
699 asmTerminateCoreLoop(animatingPr);
700 #endif
701 }
704 /*This is called from the startup & shutdown
705 */
706 void
707 VMS__cleanup_at_end_of_shutdown()
708 {
709 //unused
710 //VMSQueueStruc **readyToAnimateQs;
711 //int coreIdx;
712 //VirtProcr **masterVPs;
713 //SchedSlot ***allSchedSlots; //ptr to array of ptrs
715 //Before getting rid of everything, print out any measurements made
716 forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
717 forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
718 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, &freeHistExt );
719 #ifdef MEAS__TIME_PLUGIN
720 printHist( _VMSMasterEnv->reqHdlrLowTimeHist );
721 saveHistToFile( _VMSMasterEnv->reqHdlrLowTimeHist );
722 printHist( _VMSMasterEnv->reqHdlrHighTimeHist );
723 saveHistToFile( _VMSMasterEnv->reqHdlrHighTimeHist );
724 freeHistExt( _VMSMasterEnv->reqHdlrLowTimeHist );
725 freeHistExt( _VMSMasterEnv->reqHdlrHighTimeHist );
726 #endif
727 #ifdef MEAS__TIME_MALLOC
728 printHist( _VMSMasterEnv->mallocTimeHist );
729 saveHistToFile( _VMSMasterEnv->mallocTimeHist );
730 printHist( _VMSMasterEnv->freeTimeHist );
731 saveHistToFile( _VMSMasterEnv->freeTimeHist );
732 freeHistExt( _VMSMasterEnv->mallocTimeHist );
733 freeHistExt( _VMSMasterEnv->freeTimeHist );
734 #endif
735 #ifdef MEAS__TIME_MASTER_LOCK
736 printHist( _VMSMasterEnv->masterLockLowTimeHist );
737 printHist( _VMSMasterEnv->masterLockHighTimeHist );
738 #endif
739 #ifdef MEAS__TIME_MASTER
740 printHist( _VMSMasterEnv->pluginTimeHist );
741 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
742 {
743 freeVMSQ( readyToAnimateQs[ coreIdx ] );
744 //master VPs were created external to VMS, so use external free
745 VMS__dissipate_procr( masterVPs[ coreIdx ] );
747 freeSchedSlots( allSchedSlots[ coreIdx ] );
748 }
749 #endif
750 #ifdef MEAS__TIME_STAMP_SUSP
751 printHist( _VMSMasterEnv->pluginTimeHist );
752 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
753 {
754 freeVMSQ( readyToAnimateQs[ coreIdx ] );
755 //master VPs were created external to VMS, so use external free
756 VMS__dissipate_procr( masterVPs[ coreIdx ] );
758 freeSchedSlots( allSchedSlots[ coreIdx ] );
759 }
760 #endif
762 //All the environment data has been allocated with VMS__malloc, so just
763 // free its internal big-chunk and all inside it disappear.
764 /*
765 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
766 masterVPs = _VMSMasterEnv->masterVPs;
767 allSchedSlots = _VMSMasterEnv->allSchedSlots;
769 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
770 {
771 freeVMSQ( readyToAnimateQs[ coreIdx ] );
772 //master VPs were created external to VMS, so use external free
773 VMS__dissipate_procr( masterVPs[ coreIdx ] );
775 freeSchedSlots( allSchedSlots[ coreIdx ] );
776 }
778 VMS__free( _VMSMasterEnv->readyToAnimateQs );
779 VMS__free( _VMSMasterEnv->masterVPs );
780 VMS__free( _VMSMasterEnv->allSchedSlots );
782 //============================= MEASUREMENT STUFF ========================
783 #ifdef STATS__TURN_ON_PROBES
784 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe);
785 #endif
786 //========================================================================
787 */
788 //These are the only two that use system free
789 int i;
790 for(i=0; i<NUM_CORES; i++)
791 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead[i]);
792 free( (void *)_VMSMasterEnv );
793 }
796 //================================
799 /*Later, improve this -- for now, just exits the application after printing
800 * the error message.
801 */
802 void
803 VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData )
804 {
805 printf("%s",msgStr);
806 fflush(stdin);
807 exit(1);
808 }
