view VMS.c @ 109:659299627e70

counters done
author Nina Engelhardt
date Tue, 02 Aug 2011 17:16:12 +0200
parents 3bc3b89630c7
children 724c7a0b687f
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <malloc.h>
11 #include <inttypes.h>
12 #include <sys/time.h>
14 #include "VMS.h"
15 #include "ProcrContext.h"
16 #include "Queue_impl/BlockingQueue.h"
17 #include "Histogram/Histogram.h"
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <linux/types.h>
22 #include <linux/perf_event.h>
23 #include <errno.h>
24 #include <sys/syscall.h>
25 #include <linux/prctl.h>
28 #define thdAttrs NULL
30 //===========================================================================
31 void
32 shutdownFn( void *dummy, VirtProcr *dummy2 );
34 SchedSlot **
35 create_sched_slots();
37 void
38 create_masterEnv();
40 void
41 create_the_coreLoop_OS_threads();
43 MallocProlog *
44 create_free_list();
46 void
47 endOSThreadFn( void *initData, VirtProcr *animatingPr );
49 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
50 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
52 //===========================================================================
54 /*Setup has two phases:
55 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
56 * the master virt procr into the work-queue, ready for first "call"
57 * 2) Semantic layer then does its own init, which creates the seed virt
58 * procr inside the semantic layer, ready to schedule it when
59 * asked by the first run of the masterLoop.
60 *
61 *This part is bit weird because VMS really wants to be "always there", and
62 * have applications attach and detach.. for now, this VMS is part of
63 * the app, so the VMS system starts up as part of running the app.
64 *
65 *The semantic layer is isolated from the VMS internals by making the
66 * semantic layer do setup to a state that it's ready with its
67 * initial virt procrs, ready to schedule them to slots when the masterLoop
68 * asks. Without this pattern, the semantic layer's setup would
69 * have to modify slots directly to assign the initial virt-procrs, and put
70 * them into the readyToAnimateQ itself, breaking the isolation completely.
71 *
72 *
73 *The semantic layer creates the initial virt procr(s), and adds its
74 * own environment to masterEnv, and fills in the pointers to
75 * the requestHandler and slaveScheduler plug-in functions
76 */
78 /*This allocates VMS data structures, populates the master VMSProc,
79 * and master environment, and returns the master environment to the semantic
80 * layer.
81 */
82 void
83 VMS__init()
84 {
85 create_masterEnv();
86 create_the_coreLoop_OS_threads();
87 }
89 #ifdef SEQUENTIAL
91 /*To initialize the sequential version, just don't create the threads
92 */
93 void
94 VMS__init_Seq()
95 {
96 create_masterEnv();
97 }
99 #endif
101 void
102 create_masterEnv()
103 { MasterEnv *masterEnv;
104 VMSQueueStruc **readyToAnimateQs;
105 int coreIdx;
106 VirtProcr **masterVPs;
107 SchedSlot ***allSchedSlots; //ptr to array of ptrs
110 //Make the master env, which holds everything else
111 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
113 //Very first thing put into the master env is the free-list, seeded
114 // with a massive initial chunk of memory.
115 //After this, all other mallocs are VMS__malloc.
116 _VMSMasterEnv->freeListHead = VMS_ext__create_free_list();
119 //============================= MEASUREMENT STUFF ========================
120 #ifdef MEAS__TIME_MALLOC
121 _VMSMasterEnv->mallocTimeHist = makeFixedBinHistExt( 50, 0, 100,
122 "malloc_time_hist");
123 _VMSMasterEnv->freeTimeHist = makeFixedBinHistExt( 50, 0, 100,
124 "free_time_hist");
125 #endif
126 #ifdef MEAS__TIME_PLUGIN
127 _VMSMasterEnv->reqHdlrLowTimeHist = makeFixedBinHistExt( 50, 0, 10,
128 "plugin_low_time_hist");
129 _VMSMasterEnv->reqHdlrHighTimeHist = makeFixedBinHistExt( 50, 0, 100,
130 "plugin_high_time_hist");
131 #endif
132 //========================================================================
134 //===================== Only VMS__malloc after this ====================
135 masterEnv = (MasterEnv*)_VMSMasterEnv;
137 //Make a readyToAnimateQ for each core loop
138 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
139 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) );
141 //One array for each core, 3 in array, core's masterVP scheds all
142 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) );
144 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr
145 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
146 {
147 readyToAnimateQs[ coreIdx ] = makeVMSQ();
149 //Q: should give masterVP core-specific info as its init data?
150 masterVPs[ coreIdx ] = VMS__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
151 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
152 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
153 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
154 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL;
155 }
156 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
157 _VMSMasterEnv->masterVPs = masterVPs;
158 _VMSMasterEnv->masterLock = UNLOCKED;
159 _VMSMasterEnv->allSchedSlots = allSchedSlots;
160 _VMSMasterEnv->workStealingLock = UNLOCKED;
163 //Aug 19, 2010: no longer need to place initial masterVP into queue
164 // because coreLoop now controls -- animates its masterVP when no work
167 //============================= MEASUREMENT STUFF ========================
168 #ifdef STATS__TURN_ON_PROBES
169 _VMSMasterEnv->dynIntervalProbesInfo =
170 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200);
172 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free );
174 //put creation time directly into master env, for fast retrieval
175 struct timeval timeStamp;
176 gettimeofday( &(timeStamp), NULL);
177 _VMSMasterEnv->createPtInSecs =
178 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
179 #endif
180 #ifdef MEAS__TIME_MASTER_LOCK
181 _VMSMasterEnv->masterLockLowTimeHist = makeFixedBinHist( 50, 0, 2,
182 "master lock low time hist");
183 _VMSMasterEnv->masterLockHighTimeHist = makeFixedBinHist( 50, 0, 100,
184 "master lock high time hist");
185 #endif
187 MakeTheMeasHists();
189 #ifdef MEAS__PERF_COUNTERS
190 printf("Creating HW counters...");
191 struct perf_event_attr hw_event;
192 memset(&hw_event,0,sizeof(hw_event));
193 hw_event.type = PERF_TYPE_HARDWARE;
194 hw_event.size = sizeof(hw_event);
195 hw_event.disabled = 1;
196 hw_event.freq = 0;
197 hw_event.inherit = 1; /* children inherit it */
198 hw_event.pinned = 1; /* must always be on PMU */
199 hw_event.exclusive = 0; /* only group on PMU */
200 hw_event.exclude_user = 0; /* don't count user */
201 hw_event.exclude_kernel = 1; /* ditto kernel */
202 hw_event.exclude_hv = 1; /* ditto hypervisor */
203 hw_event.exclude_idle = 0; /* don't count when idle */
204 hw_event.mmap = 0; /* include mmap data */
205 hw_event.comm = 0; /* include comm data */
208 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
209 {
210 hw_event.config = 0x0000000000000000; //cycles
211 _VMSMasterEnv->cycles_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
212 0,//pid_t pid,
213 -1,//int cpu,
214 -1,//int group_fd,
215 0//unsigned long flags
216 );
217 if (_VMSMasterEnv->cycles_counter_fd[coreIdx]<0){
218 fprintf(stderr,"On core %d: ",coreIdx);
219 perror("Failed to open cycles counter");
220 }
221 hw_event.config = 0x0000000000000001; //instrs
222 _VMSMasterEnv->instrs_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
223 0,//pid_t pid,
224 -1,//int cpu,
225 -1,//int group_fd,
226 0//unsigned long flags
227 );
228 if (_VMSMasterEnv->instrs_counter_fd[coreIdx]<0){
229 fprintf(stderr,"On core %d: ",coreIdx);
230 perror("Failed to open instrs counter");
231 }
232 }
233 prctl(PR_TASK_PERF_EVENTS_ENABLE);
234 uint64 tmpc,tmpi;
235 saveCyclesAndInstrs(0,tmpc,tmpi);
236 printf("Start: cycles = %lu, instrs = %lu\n",tmpc,tmpi);
237 #endif
239 //========================================================================
241 }
243 SchedSlot **
244 create_sched_slots()
245 { SchedSlot **schedSlots;
246 int i;
248 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
250 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
251 {
252 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) );
254 //Set state to mean "handling requests done, slot needs filling"
255 schedSlots[i]->workIsDone = FALSE;
256 schedSlots[i]->needsProcrAssigned = TRUE;
257 }
258 return schedSlots;
259 }
262 void
263 freeSchedSlots( SchedSlot **schedSlots )
264 { int i;
265 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
266 {
267 VMS__free( schedSlots[i] );
268 }
269 VMS__free( schedSlots );
270 }
273 void
274 create_the_coreLoop_OS_threads()
275 {
276 //========================================================================
277 // Create the Threads
278 int coreIdx, retCode;
280 //Need the threads to be created suspended, and wait for a signal
281 // before proceeding -- gives time after creating to initialize other
282 // stuff before the coreLoops set off.
283 _VMSMasterEnv->setupComplete = 0;
285 //Make the threads that animate the core loops
286 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
287 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) );
288 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
290 retCode =
291 pthread_create( &(coreLoopThdHandles[coreIdx]),
292 thdAttrs,
293 &coreLoop,
294 (void *)(coreLoopThdParams[coreIdx]) );
295 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
296 }
297 }
299 /*Semantic layer calls this when it want the system to start running..
300 *
301 *This starts the core loops running then waits for them to exit.
302 */
303 void
304 VMS__start_the_work_then_wait_until_done()
305 { int coreIdx;
306 //Start the core loops running
308 //tell the core loop threads that setup is complete
309 //get lock, to lock out any threads still starting up -- they'll see
310 // that setupComplete is true before entering while loop, and so never
311 // wait on the condition
312 pthread_mutex_lock( &suspendLock );
313 _VMSMasterEnv->setupComplete = 1;
314 pthread_mutex_unlock( &suspendLock );
315 pthread_cond_broadcast( &suspend_cond );
318 //wait for all to complete
319 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
320 {
321 pthread_join( coreLoopThdHandles[coreIdx], NULL );
322 }
324 //NOTE: do not clean up VMS env here -- semantic layer has to have
325 // a chance to clean up its environment first, then do a call to free
326 // the Master env and rest of VMS locations
327 }
329 #ifdef SEQUENTIAL
330 /*Only difference between version with an OS thread pinned to each core and
331 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
332 */
333 void
334 VMS__start_the_work_then_wait_until_done_Seq()
335 {
336 //Instead of un-suspending threads, just call the one and only
337 // core loop (sequential version), in the main thread.
338 coreLoop_Seq( NULL );
339 flushRegisters();
341 }
342 #endif
344 inline VirtProcr *
345 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
346 { VirtProcr *newPr;
347 void *stackLocs;
349 newPr = VMS__malloc( sizeof(VirtProcr) );
350 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE );
351 if( stackLocs == 0 )
352 { perror("VMS__malloc stack"); exit(1); }
354 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
355 }
357 /* "ext" designates that it's for use outside the VMS system -- should only
358 * be called from main thread or other thread -- never from code animated by
359 * a VMS virtual processor.
360 */
361 inline VirtProcr *
362 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
363 { VirtProcr *newPr;
364 char *stackLocs;
366 newPr = malloc( sizeof(VirtProcr) );
367 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
368 if( stackLocs == 0 )
369 { perror("malloc stack"); exit(1); }
371 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
372 }
375 /*Anticipating multi-tasking
376 */
377 void *
378 VMS__give_sem_env_for( VirtProcr *animPr )
379 {
380 return _VMSMasterEnv->semanticEnv;
381 }
382 //===========================================================================
383 /*there is a label inside this function -- save the addr of this label in
384 * the callingPr struc, as the pick-up point from which to start the next
385 * work-unit for that procr. If turns out have to save registers, then
386 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
387 * "done with work-unit" label. The procr struc is in the request in the
388 * slave that animated the just-ended work-unit, so all the state is saved
389 * there, and will get passed along, inside the request handler, to the
390 * next work-unit for that procr.
391 */
392 void
393 VMS__suspend_procr( VirtProcr *animatingPr )
394 {
396 //The request to master will cause this suspended virt procr to get
397 // scheduled again at some future point -- to resume, core loop jumps
398 // to the resume point (below), which causes restore of saved regs and
399 // "return" from this call.
400 //animatingPr->nextInstrPt = &&ResumePt;
402 //return ownership of the virt procr and sched slot to Master virt pr
403 animatingPr->schedSlot->workIsDone = TRUE;
405 //=========================== Measurement stuff ========================
406 #ifdef MEAS__TIME_STAMP_SUSP
407 //record time stamp: compare to time-stamp recorded below
408 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
409 #endif
410 //=======================================================================
412 switchToCoreLoop(animatingPr);
413 flushRegisters();
415 //=======================================================================
417 #ifdef MEAS__TIME_STAMP_SUSP
418 //NOTE: only take low part of count -- do sanity check when take diff
419 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
420 #endif
422 return;
423 }
427 /*For this implementation of VMS, it may not make much sense to have the
428 * system of requests for creating a new processor done this way.. but over
429 * the scope of single-master, multi-master, mult-tasking, OS-implementing,
430 * distributed-memory, and so on, this gives VMS implementation a chance to
431 * do stuff before suspend, in the AppVP, and in the Master before the plugin
432 * is called, as well as in the lang-lib before this is called, and in the
433 * plugin. So, this gives both VMS and language implementations a chance to
434 * intercept at various points and do order-dependent stuff.
435 *Having a standard VMSNewPrReqData struc allows the language to create and
436 * free the struc, while VMS knows how to get the newPr if it wants it, and
437 * it lets the lang have lang-specific data related to creation transported
438 * to the plugin.
439 */
440 void
441 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr ) __attribute__ ((noinline))
443 { VMSReqst req;
445 req.reqType = createReq;
446 req.semReqData = semReqData;
447 req.nextReqst = reqstingPr->requests;
448 reqstingPr->requests = &req;
450 VMS__suspend_procr( reqstingPr );
451 }
454 /*
455 *This adds a request to dissipate, then suspends the processor so that the
456 * request handler will receive the request. The request handler is what
457 * does the work of freeing memory and removing the processor from the
458 * semantic environment's data structures.
459 *The request handler also is what figures out when to shutdown the VMS
460 * system -- which causes all the core loop threads to die, and returns from
461 * the call that started up VMS to perform the work.
462 *
463 *This form is a bit misleading to understand if one is trying to figure out
464 * how VMS works -- it looks like a normal function call, but inside it
465 * sends a request to the request handler and suspends the processor, which
466 * jumps out of the VMS__dissipate_procr function, and out of all nestings
467 * above it, transferring the work of dissipating to the request handler,
468 * which then does the actual work -- causing the processor that animated
469 * the call of this function to disappear and the "hanging" state of this
470 * function to just poof into thin air -- the virtual processor's trace
471 * never returns from this call, but instead the virtual processor's trace
472 * gets suspended in this call and all the virt processor's state disap-
473 * pears -- making that suspend the last thing in the virt procr's trace.
474 */
475 void
476 VMS__send_dissipate_req( VirtProcr *procrToDissipate ) __attribute__ ((noinline))
477 { VMSReqst req;
479 req.reqType = dissipate;
480 req.nextReqst = procrToDissipate->requests;
481 procrToDissipate->requests = &req;
483 VMS__suspend_procr( procrToDissipate );
484 }
487 /* "ext" designates that it's for use outside the VMS system -- should only
488 * be called from main thread or other thread -- never from code animated by
489 * a VMS virtual processor.
490 *
491 *Use this version to dissipate VPs created outside the VMS system.
492 */
493 void
494 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate )
495 {
496 //NOTE: initialData was given to the processor, so should either have
497 // been alloc'd with VMS__malloc, or freed by the level above animPr.
498 //So, all that's left to free here is the stack and the VirtProcr struc
499 // itself
500 //Note, should not stack-allocate initial data -- no guarantee, in
501 // general that creating processor will outlive ones it creates.
502 free( procrToDissipate->startOfStack );
503 free( procrToDissipate );
504 }
508 /*This call's name indicates that request is malloc'd -- so req handler
509 * has to free any extra requests tacked on before a send, using this.
510 *
511 * This inserts the semantic-layer's request data into standard VMS carrier
512 * request data-struct that is mallocd. The sem request doesn't need to
513 * be malloc'd if this is called inside the same call chain before the
514 * send of the last request is called.
515 *
516 *The request handler has to call VMS__free_VMSReq for any of these
517 */
518 inline void
519 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
520 VirtProcr *callingPr )
521 { VMSReqst *req;
523 req = VMS__malloc( sizeof(VMSReqst) );
524 req->reqType = semantic;
525 req->semReqData = semReqData;
526 req->nextReqst = callingPr->requests;
527 callingPr->requests = req;
528 }
530 /*This inserts the semantic-layer's request data into standard VMS carrier
531 * request data-struct is allocated on stack of this call & ptr to it sent
532 * to plugin
533 *Then it does suspend, to cause request to be sent.
534 */
535 /*inline*/ void
536 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr ) __attribute__ ((noinline))
537 { VMSReqst req;
539 req.reqType = semantic;
540 req.semReqData = semReqData;
541 req.nextReqst = callingPr->requests;
542 callingPr->requests = &req;
544 VMS__suspend_procr( callingPr );
545 }
548 /*inline*/ void
549 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr ) __attribute__ ((noinline))
551 { VMSReqst req;
553 req.reqType = VMSSemantic;
554 req.semReqData = semReqData;
555 req.nextReqst = callingPr->requests; //gab any other preceeding
556 callingPr->requests = &req;
558 VMS__suspend_procr( callingPr );
559 }
562 /*
563 */
564 VMSReqst *
565 VMS__take_next_request_out_of( VirtProcr *procrWithReq )
566 { VMSReqst *req;
568 req = procrWithReq->requests;
569 if( req == NULL ) return NULL;
571 procrWithReq->requests = procrWithReq->requests->nextReqst;
572 return req;
573 }
576 inline void *
577 VMS__take_sem_reqst_from( VMSReqst *req )
578 {
579 return req->semReqData;
580 }
584 /* This is for OS requests and VMS infrastructure requests, such as to create
585 * a probe -- a probe is inside the heart of VMS-core, it's not part of any
586 * language -- but it's also a semantic thing that's triggered from and used
587 * in the application.. so it crosses abstractions.. so, need some special
588 * pattern here for handling such requests.
589 * Doing this just like it were a second language sharing VMS-core.
590 *
591 * This is called from the language's request handler when it sees a request
592 * of type VMSSemReq
593 *
594 * TODO: Later change this, to give probes their own separate plugin & have
595 * VMS-core steer the request to appropriate plugin
596 * Do the same for OS calls -- look later at it..
597 */
598 void inline
599 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv,
600 ResumePrFnPtr resumePrFnPtr )
601 { VMSSemReq *semReq;
602 IntervalProbe *newProbe;
604 semReq = req->semReqData;
606 newProbe = VMS__malloc( sizeof(IntervalProbe) );
607 newProbe->nameStr = VMS__strDup( semReq->nameStr );
608 newProbe->hist = NULL;
609 newProbe->schedChoiceWasRecorded = FALSE;
611 //This runs in masterVP, so no race-condition worries
612 newProbe->probeID =
613 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo );
615 requestingPr->dataRetFromReq = newProbe;
617 (*resumePrFnPtr)( requestingPr, semEnv );
618 }
622 /*This must be called by the request handler plugin -- it cannot be called
623 * from the semantic library "dissipate processor" function -- instead, the
624 * semantic layer has to generate a request, and the plug-in calls this
625 * function.
626 *The reason is that this frees the virtual processor's stack -- which is
627 * still in use inside semantic library calls!
628 *
629 *This frees or recycles all the state owned by and comprising the VMS
630 * portion of the animating virtual procr. The request handler must first
631 * free any semantic data created for the processor that didn't use the
632 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
633 * system to disown any state that did use VMS_malloc, and then frees the
634 * statck and the processor-struct itself.
635 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
636 * state, then that state gets freed (or sent to recycling) as a side-effect
637 * of dis-owning it.
638 */
639 void
640 VMS__dissipate_procr( VirtProcr *animatingPr )
641 {
642 //dis-own all locations owned by this processor, causing to be freed
643 // any locations that it is (was) sole owner of
644 //TODO: implement VMS__malloc system, including "give up ownership"
647 //NOTE: initialData was given to the processor, so should either have
648 // been alloc'd with VMS__malloc, or freed by the level above animPr.
649 //So, all that's left to free here is the stack and the VirtProcr struc
650 // itself
651 //Note, should not stack-allocate initial data -- no guarantee, in
652 // general that creating processor will outlive ones it creates.
653 VMS__free( animatingPr->startOfStack );
654 VMS__free( animatingPr );
655 }
658 //TODO: look at architecting cleanest separation between request handler
659 // and master loop, for dissipate, create, shutdown, and other non-semantic
660 // requests. Issue is chain: one removes requests from AppVP, one dispatches
661 // on type of request, and one handles each type.. but some types require
662 // action from both request handler and master loop -- maybe just give the
663 // request handler calls like: VMS__handle_X_request_type
666 /*This is called by the semantic layer's request handler when it decides its
667 * time to shut down the VMS system. Calling this causes the core loop OS
668 * threads to exit, which unblocks the entry-point function that started up
669 * VMS, and allows it to grab the result and return to the original single-
670 * threaded application.
671 *
672 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
673 * and-wait function has to free a bunch of stuff after it detects the
674 * threads have all died: the masterEnv, the thread-related locations,
675 * masterVP any AppVPs that might still be allocated and sitting in the
676 * semantic environment, or have been orphaned in the _VMSWorkQ.
677 *
678 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
679 * locations it needs, and give ownership to masterVP. Then, they will be
680 * automatically freed.
681 *
682 *In here,create one core-loop shut-down processor for each core loop and put
683 * them all directly into the readyToAnimateQ.
684 *Note, this function can ONLY be called after the semantic environment no
685 * longer cares if AppVPs get animated after the point this is called. In
686 * other words, this can be used as an abort, or else it should only be
687 * called when all AppVPs have finished dissipate requests -- only at that
688 * point is it sure that all results have completed.
689 */
690 void
691 VMS__shutdown()
692 { int coreIdx;
693 VirtProcr *shutDownPr;
695 //create the shutdown processors, one for each core loop -- put them
696 // directly into the Q -- each core will die when gets one
697 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
698 { //Note, this is running in the master
699 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
700 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
701 }
702 #ifdef MEAS__PERF_COUNTERS
703 uint64 tmpc,tmpi;
704 saveCyclesAndInstrs(0,tmpc,tmpi);
705 printf("End: cycles = %lu, instrs = %lu\n",tmpc,tmpi);
706 prctl(PR_TASK_PERF_EVENTS_DISABLE);
707 /*
708 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ){
709 close(_VMSMasterEnv->cycles_counter_fd[coreIdx]);
710 close(_VMSMasterEnv->instrs_counter_fd[coreIdx]);
711 }
712 */
713 #endif
714 }
717 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
718 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
719 *This function has the sole purpose of setting the stack and framePtr
720 * to the coreLoop's stack and framePtr.. it does that then jumps to the
721 * core loop's shutdown point -- might be able to just call Pthread_exit
722 * from here, but am going back to the pthread's stack and setting everything
723 * up just as if it never jumped out, before calling pthread_exit.
724 *The end-point of core loop will free the stack and so forth of the
725 * processor that animates this function, (this fn is transfering the
726 * animator of the AppVP that is in turn animating this function over
727 * to core loop function -- note that this slices out a level of virtual
728 * processors).
729 */
730 void
731 endOSThreadFn( void *initData, VirtProcr *animatingPr )
732 {
733 #ifdef SEQUENTIAL
734 asmTerminateCoreLoopSeq(animatingPr);
735 #else
736 asmTerminateCoreLoop(animatingPr);
737 #endif
738 }
741 /*This is called from the startup & shutdown
742 */
743 void
744 VMS__cleanup_at_end_of_shutdown()
745 {
746 //unused
747 //VMSQueueStruc **readyToAnimateQs;
748 //int coreIdx;
749 //VirtProcr **masterVPs;
750 //SchedSlot ***allSchedSlots; //ptr to array of ptrs
752 //Before getting rid of everything, print out any measurements made
753 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
754 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
755 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, &freeHistExt );
756 #ifdef MEAS__TIME_PLUGIN
757 printHist( _VMSMasterEnv->reqHdlrLowTimeHist );
758 saveHistToFile( _VMSMasterEnv->reqHdlrHighTimeHist );
759 printHist( _VMSMasterEnv->reqHdlrHighTimeHist );
760 saveHistToFile( _VMSMasterEnv->reqHdlrHighTimeHist );
761 freeHistExt( _VMSMasterEnv->reqHdlrLowTimeHist );
762 freeHistExt( _VMSMasterEnv->reqHdlrHighTimeHist );
763 #endif
764 #ifdef MEAS__TIME_MALLOC
765 printHist( _VMSMasterEnv->mallocTimeHist );
766 saveHistToFile( _VMSMasterEnv->mallocTimeHist );
767 printHist( _VMSMasterEnv->freeTimeHist );
768 saveHistToFile( _VMSMasterEnv->freeTimeHist );
769 freeHistExt( _VMSMasterEnv->mallocTimeHist );
770 freeHistExt( _VMSMasterEnv->freeTimeHist );
771 #endif
772 #ifdef MEAS__TIME_MASTER_LOCK
773 printHist( _VMSMasterEnv->masterLockLowTimeHist );
774 printHist( _VMSMasterEnv->masterLockHighTimeHist );
775 #endif
776 #ifdef MEAS__TIME_MASTER
777 printHist( _VMSMasterEnv->pluginTimeHist );
778 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
779 {
780 freeVMSQ( readyToAnimateQs[ coreIdx ] );
781 //master VPs were created external to VMS, so use external free
782 VMS__dissipate_procr( masterVPs[ coreIdx ] );
784 freeSchedSlots( allSchedSlots[ coreIdx ] );
785 }
786 #endif
787 #ifdef MEAS__TIME_STAMP_SUSP
788 printHist( _VMSMasterEnv->pluginTimeHist );
789 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
790 {
791 freeVMSQ( readyToAnimateQs[ coreIdx ] );
792 //master VPs were created external to VMS, so use external free
793 VMS__dissipate_procr( masterVPs[ coreIdx ] );
795 freeSchedSlots( allSchedSlots[ coreIdx ] );
796 }
797 #endif
799 //All the environment data has been allocated with VMS__malloc, so just
800 // free its internal big-chunk and all inside it disappear.
801 /*
802 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
803 masterVPs = _VMSMasterEnv->masterVPs;
804 allSchedSlots = _VMSMasterEnv->allSchedSlots;
806 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
807 {
808 freeVMSQ( readyToAnimateQs[ coreIdx ] );
809 //master VPs were created external to VMS, so use external free
810 VMS__dissipate_procr( masterVPs[ coreIdx ] );
812 freeSchedSlots( allSchedSlots[ coreIdx ] );
813 }
815 VMS__free( _VMSMasterEnv->readyToAnimateQs );
816 VMS__free( _VMSMasterEnv->masterVPs );
817 VMS__free( _VMSMasterEnv->allSchedSlots );
819 //============================= MEASUREMENT STUFF ========================
820 #ifdef STATS__TURN_ON_PROBES
821 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe);
822 #endif
823 //========================================================================
824 */
825 //These are the only two that use system free
826 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead );
827 free( (void *)_VMSMasterEnv );
828 }
831 //================================
834 /*Later, improve this -- for now, just exits the application after printing
835 * the error message.
836 */
837 void
838 VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData )
839 {
840 printf("%s",msgStr);
841 fflush(stdin);
842 exit(1);
843 }