view VMS.c @ 131:395f58384a5c

dot output
author Nina Engelhardt
date Thu, 15 Sep 2011 17:31:33 +0200
parents 5475f90c248a
children aefd87f9d12f d7c0c0a8187a
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <malloc.h>
11 #include <inttypes.h>
12 #include <sys/time.h>
14 #include "VMS.h"
15 #include "ProcrContext.h"
16 #include "Queue_impl/BlockingQueue.h"
17 #include "Histogram/Histogram.h"
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <linux/types.h>
22 #include <linux/perf_event.h>
23 #include <errno.h>
24 #include <sys/syscall.h>
25 #include <linux/prctl.h>
28 #define thdAttrs NULL
30 //===========================================================================
31 void
32 shutdownFn( void *dummy, VirtProcr *dummy2 );
34 SchedSlot **
35 create_sched_slots();
37 void
38 create_masterEnv();
40 void
41 create_the_coreLoop_OS_threads();
43 MallocProlog *
44 create_free_list();
46 void
47 endOSThreadFn( void *initData, VirtProcr *animatingPr );
49 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
50 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
52 //===========================================================================
54 /*Setup has two phases:
55 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
56 * the master virt procr into the work-queue, ready for first "call"
57 * 2) Semantic layer then does its own init, which creates the seed virt
58 * procr inside the semantic layer, ready to schedule it when
59 * asked by the first run of the masterLoop.
60 *
61 *This part is bit weird because VMS really wants to be "always there", and
62 * have applications attach and detach.. for now, this VMS is part of
63 * the app, so the VMS system starts up as part of running the app.
64 *
65 *The semantic layer is isolated from the VMS internals by making the
66 * semantic layer do setup to a state that it's ready with its
67 * initial virt procrs, ready to schedule them to slots when the masterLoop
68 * asks. Without this pattern, the semantic layer's setup would
69 * have to modify slots directly to assign the initial virt-procrs, and put
70 * them into the readyToAnimateQ itself, breaking the isolation completely.
71 *
72 *
73 *The semantic layer creates the initial virt procr(s), and adds its
74 * own environment to masterEnv, and fills in the pointers to
75 * the requestHandler and slaveScheduler plug-in functions
76 */
78 /*This allocates VMS data structures, populates the master VMSProc,
79 * and master environment, and returns the master environment to the semantic
80 * layer.
81 */
82 void
83 VMS__init()
84 {
85 create_masterEnv();
86 create_the_coreLoop_OS_threads();
87 }
89 #ifdef SEQUENTIAL
91 /*To initialize the sequential version, just don't create the threads
92 */
93 void
94 VMS__init_Seq()
95 {
96 create_masterEnv();
97 }
99 #endif
101 void
102 create_masterEnv()
103 { MasterEnv *masterEnv;
104 VMSQueueStruc **readyToAnimateQs;
105 int coreIdx;
106 VirtProcr **masterVPs;
107 SchedSlot ***allSchedSlots; //ptr to array of ptrs
110 //Make the master env, which holds everything else
111 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
113 //Very first thing put into the master env is the free-list, seeded
114 // with a massive initial chunk of memory.
115 //After this, all other mallocs are VMS__malloc.
116 _VMSMasterEnv->freeListHead = VMS_ext__create_free_list();
119 //============================= MEASUREMENT STUFF ========================
120 #ifdef MEAS__TIME_MALLOC
121 _VMSMasterEnv->mallocTimeHist = makeFixedBinHistExt( 100, 0, 100,
122 "malloc_time_hist");
123 _VMSMasterEnv->freeTimeHist = makeFixedBinHistExt( 80, 0, 100,
124 "free_time_hist");
125 #endif
126 #ifdef MEAS__TIME_PLUGIN
127 _VMSMasterEnv->reqHdlrLowTimeHist = makeFixedBinHistExt( 1000, 0, 100,
128 "plugin_low_time_hist");
129 _VMSMasterEnv->reqHdlrHighTimeHist = makeFixedBinHistExt( 1000, 0, 100,
130 "plugin_high_time_hist");
131 #endif
132 //========================================================================
134 //===================== Only VMS__malloc after this ====================
135 masterEnv = (MasterEnv*)_VMSMasterEnv;
137 //Make a readyToAnimateQ for each core loop
138 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
139 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) );
141 //One array for each core, 3 in array, core's masterVP scheds all
142 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) );
144 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr
145 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
146 {
147 readyToAnimateQs[ coreIdx ] = makeVMSQ();
149 //Q: should give masterVP core-specific info as its init data?
150 masterVPs[ coreIdx ] = VMS__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
151 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
152 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
153 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
154 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL;
155 }
156 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
157 _VMSMasterEnv->masterVPs = masterVPs;
158 _VMSMasterEnv->masterLock = UNLOCKED;
159 _VMSMasterEnv->allSchedSlots = allSchedSlots;
160 _VMSMasterEnv->workStealingLock = UNLOCKED;
163 //Aug 19, 2010: no longer need to place initial masterVP into queue
164 // because coreLoop now controls -- animates its masterVP when no work
167 //============================= MEASUREMENT STUFF ========================
168 #ifdef STATS__TURN_ON_PROBES
169 _VMSMasterEnv->dynIntervalProbesInfo =
170 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200);
172 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free );
174 //put creation time directly into master env, for fast retrieval
175 struct timeval timeStamp;
176 gettimeofday( &(timeStamp), NULL);
177 _VMSMasterEnv->createPtInSecs =
178 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
179 #endif
180 #ifdef MEAS__TIME_MASTER_LOCK
181 _VMSMasterEnv->masterLockLowTimeHist = makeFixedBinHist( 50, 0, 2,
182 "master lock low time hist");
183 _VMSMasterEnv->masterLockHighTimeHist = makeFixedBinHist( 50, 0, 100,
184 "master lock high time hist");
185 #endif
187 MakeTheMeasHists();
189 #ifdef DETECT_DEPENDENCIES
190 _VMSMasterEnv->dependencies = VMS__malloc(10*sizeof(void*));
191 _VMSMasterEnv->dependenciesInfo = makePrivDynArrayInfoFrom((void***)&(_VMSMasterEnv->dependencies),10);
192 #endif
194 #ifdef MEAS__PERF_COUNTERS
195 _VMSMasterEnv->counter_history = VMS__malloc(10*sizeof(void*));
196 _VMSMasterEnv->counter_history_array_info = makePrivDynArrayInfoFrom((void***)&(_VMSMasterEnv->counter_history),10);
197 //printf("Creating HW counters...");
198 FILE* output;
199 int n;
200 char filename[255];
201 for(n=0;n<255;n++)
202 {
203 sprintf(filename, "./counters/Counters.%d.csv",n);
204 output = fopen(filename,"r");
205 if(output)
206 {
207 fclose(output);
208 }else{
209 break;
210 }
211 }
212 printf("Saving Counter measurements to File: %s ...\n", filename);
213 output = fopen(filename,"w+");
214 _VMSMasterEnv->counteroutput = output;
216 struct perf_event_attr hw_event;
217 memset(&hw_event,0,sizeof(hw_event));
218 hw_event.type = PERF_TYPE_HARDWARE;
219 hw_event.size = sizeof(hw_event);
220 hw_event.disabled = 1;
221 hw_event.freq = 0;
222 hw_event.inherit = 1; /* children inherit it */
223 hw_event.pinned = 1; /* must always be on PMU */
224 hw_event.exclusive = 0; /* only group on PMU */
225 hw_event.exclude_user = 0; /* don't count user */
226 hw_event.exclude_kernel = 1; /* ditto kernel */
227 hw_event.exclude_hv = 1; /* ditto hypervisor */
228 hw_event.exclude_idle = 0; /* don't count when idle */
229 hw_event.mmap = 0; /* include mmap data */
230 hw_event.comm = 0; /* include comm data */
233 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
234 {
235 hw_event.config = 0x0000000000000000; //cycles
236 _VMSMasterEnv->cycles_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
237 0,//pid_t pid,
238 -1,//int cpu,
239 -1,//int group_fd,
240 0//unsigned long flags
241 );
242 if (_VMSMasterEnv->cycles_counter_fd[coreIdx]<0){
243 fprintf(stderr,"On core %d: ",coreIdx);
244 perror("Failed to open cycles counter");
245 }
246 hw_event.config = 0x0000000000000001; //instrs
247 _VMSMasterEnv->instrs_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
248 0,//pid_t pid,
249 -1,//int cpu,
250 -1,//int group_fd,
251 0//unsigned long flags
252 );
253 if (_VMSMasterEnv->instrs_counter_fd[coreIdx]<0){
254 fprintf(stderr,"On core %d: ",coreIdx);
255 perror("Failed to open instrs counter");
256 }
257 }
258 prctl(PR_TASK_PERF_EVENTS_ENABLE);
259 uint64 tmpc,tmpi;
260 saveCyclesAndInstrs(0,tmpc,tmpi);
261 printf("Start: cycles = %llu, instrs = %llu\n",tmpc,tmpi);
262 #endif
264 //========================================================================
266 }
268 SchedSlot **
269 create_sched_slots()
270 { SchedSlot **schedSlots;
271 int i;
273 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
275 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
276 {
277 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) );
279 //Set state to mean "handling requests done, slot needs filling"
280 schedSlots[i]->workIsDone = FALSE;
281 schedSlots[i]->needsProcrAssigned = TRUE;
282 }
283 return schedSlots;
284 }
287 void
288 freeSchedSlots( SchedSlot **schedSlots )
289 { int i;
290 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
291 {
292 VMS__free( schedSlots[i] );
293 }
294 VMS__free( schedSlots );
295 }
298 void
299 create_the_coreLoop_OS_threads()
300 {
301 //========================================================================
302 // Create the Threads
303 int coreIdx, retCode;
305 //Need the threads to be created suspended, and wait for a signal
306 // before proceeding -- gives time after creating to initialize other
307 // stuff before the coreLoops set off.
308 _VMSMasterEnv->setupComplete = 0;
310 //Make the threads that animate the core loops
311 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
312 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) );
313 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
315 retCode =
316 pthread_create( &(coreLoopThdHandles[coreIdx]),
317 thdAttrs,
318 &coreLoop,
319 (void *)(coreLoopThdParams[coreIdx]) );
320 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
321 }
322 }
324 /*Semantic layer calls this when it want the system to start running..
325 *
326 *This starts the core loops running then waits for them to exit.
327 */
328 void
329 VMS__start_the_work_then_wait_until_done()
330 { int coreIdx;
331 //Start the core loops running
333 //tell the core loop threads that setup is complete
334 //get lock, to lock out any threads still starting up -- they'll see
335 // that setupComplete is true before entering while loop, and so never
336 // wait on the condition
337 pthread_mutex_lock( &suspendLock );
338 _VMSMasterEnv->setupComplete = 1;
339 pthread_mutex_unlock( &suspendLock );
340 pthread_cond_broadcast( &suspend_cond );
343 //wait for all to complete
344 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
345 {
346 pthread_join( coreLoopThdHandles[coreIdx], NULL );
347 }
349 //NOTE: do not clean up VMS env here -- semantic layer has to have
350 // a chance to clean up its environment first, then do a call to free
351 // the Master env and rest of VMS locations
352 }
354 #ifdef SEQUENTIAL
355 /*Only difference between version with an OS thread pinned to each core and
356 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
357 */
358 void
359 VMS__start_the_work_then_wait_until_done_Seq()
360 {
361 //Instead of un-suspending threads, just call the one and only
362 // core loop (sequential version), in the main thread.
363 coreLoop_Seq( NULL );
364 flushRegisters();
366 }
367 #endif
369 inline VirtProcr *
370 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
371 { VirtProcr *newPr;
372 void *stackLocs;
374 newPr = VMS__malloc( sizeof(VirtProcr) );
375 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE );
376 if( stackLocs == 0 )
377 { perror("VMS__malloc stack"); exit(1); }
379 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
380 }
382 /* "ext" designates that it's for use outside the VMS system -- should only
383 * be called from main thread or other thread -- never from code animated by
384 * a VMS virtual processor.
385 */
386 inline VirtProcr *
387 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
388 { VirtProcr *newPr;
389 char *stackLocs;
391 newPr = malloc( sizeof(VirtProcr) );
392 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
393 if( stackLocs == 0 )
394 { perror("malloc stack"); exit(1); }
396 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
397 }
400 /*Anticipating multi-tasking
401 */
402 void *
403 VMS__give_sem_env_for( VirtProcr *animPr )
404 {
405 return _VMSMasterEnv->semanticEnv;
406 }
407 //===========================================================================
408 /*there is a label inside this function -- save the addr of this label in
409 * the callingPr struc, as the pick-up point from which to start the next
410 * work-unit for that procr. If turns out have to save registers, then
411 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
412 * "done with work-unit" label. The procr struc is in the request in the
413 * slave that animated the just-ended work-unit, so all the state is saved
414 * there, and will get passed along, inside the request handler, to the
415 * next work-unit for that procr.
416 */
417 void
418 VMS__suspend_procr( VirtProcr *animatingPr )
419 {
421 //The request to master will cause this suspended virt procr to get
422 // scheduled again at some future point -- to resume, core loop jumps
423 // to the resume point (below), which causes restore of saved regs and
424 // "return" from this call.
425 //animatingPr->nextInstrPt = &&ResumePt;
427 //return ownership of the virt procr and sched slot to Master virt pr
428 animatingPr->schedSlot->workIsDone = TRUE;
430 //=========================== Measurement stuff ========================
431 #ifdef MEAS__TIME_STAMP_SUSP
432 //record time stamp: compare to time-stamp recorded below
433 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
434 #endif
435 //=======================================================================
437 switchToCoreLoop(animatingPr);
438 flushRegisters();
440 //=======================================================================
442 #ifdef MEAS__TIME_STAMP_SUSP
443 //NOTE: only take low part of count -- do sanity check when take diff
444 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
445 #endif
447 return;
448 }
452 /*For this implementation of VMS, it may not make much sense to have the
453 * system of requests for creating a new processor done this way.. but over
454 * the scope of single-master, multi-master, mult-tasking, OS-implementing,
455 * distributed-memory, and so on, this gives VMS implementation a chance to
456 * do stuff before suspend, in the AppVP, and in the Master before the plugin
457 * is called, as well as in the lang-lib before this is called, and in the
458 * plugin. So, this gives both VMS and language implementations a chance to
459 * intercept at various points and do order-dependent stuff.
460 *Having a standard VMSNewPrReqData struc allows the language to create and
461 * free the struc, while VMS knows how to get the newPr if it wants it, and
462 * it lets the lang have lang-specific data related to creation transported
463 * to the plugin.
464 */
465 __attribute__ ((noinline)) void
466 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr )
468 { VMSReqst req;
470 req.reqType = createReq;
471 req.semReqData = semReqData;
472 req.nextReqst = reqstingPr->requests;
473 reqstingPr->requests = &req;
475 VMS__suspend_procr( reqstingPr );
476 }
479 /*
480 *This adds a request to dissipate, then suspends the processor so that the
481 * request handler will receive the request. The request handler is what
482 * does the work of freeing memory and removing the processor from the
483 * semantic environment's data structures.
484 *The request handler also is what figures out when to shutdown the VMS
485 * system -- which causes all the core loop threads to die, and returns from
486 * the call that started up VMS to perform the work.
487 *
488 *This form is a bit misleading to understand if one is trying to figure out
489 * how VMS works -- it looks like a normal function call, but inside it
490 * sends a request to the request handler and suspends the processor, which
491 * jumps out of the VMS__dissipate_procr function, and out of all nestings
492 * above it, transferring the work of dissipating to the request handler,
493 * which then does the actual work -- causing the processor that animated
494 * the call of this function to disappear and the "hanging" state of this
495 * function to just poof into thin air -- the virtual processor's trace
496 * never returns from this call, but instead the virtual processor's trace
497 * gets suspended in this call and all the virt processor's state disap-
498 * pears -- making that suspend the last thing in the virt procr's trace.
499 */
500 __attribute__ ((noinline)) void
501 VMS__send_dissipate_req( VirtProcr *procrToDissipate )
502 { VMSReqst req;
504 req.reqType = dissipate;
505 req.nextReqst = procrToDissipate->requests;
506 procrToDissipate->requests = &req;
508 VMS__suspend_procr( procrToDissipate );
509 }
512 /* "ext" designates that it's for use outside the VMS system -- should only
513 * be called from main thread or other thread -- never from code animated by
514 * a VMS virtual processor.
515 *
516 *Use this version to dissipate VPs created outside the VMS system.
517 */
518 void
519 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate )
520 {
521 //NOTE: initialData was given to the processor, so should either have
522 // been alloc'd with VMS__malloc, or freed by the level above animPr.
523 //So, all that's left to free here is the stack and the VirtProcr struc
524 // itself
525 //Note, should not stack-allocate initial data -- no guarantee, in
526 // general that creating processor will outlive ones it creates.
527 free( procrToDissipate->startOfStack );
528 free( procrToDissipate );
529 }
533 /*This call's name indicates that request is malloc'd -- so req handler
534 * has to free any extra requests tacked on before a send, using this.
535 *
536 * This inserts the semantic-layer's request data into standard VMS carrier
537 * request data-struct that is mallocd. The sem request doesn't need to
538 * be malloc'd if this is called inside the same call chain before the
539 * send of the last request is called.
540 *
541 *The request handler has to call VMS__free_VMSReq for any of these
542 */
543 inline void
544 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
545 VirtProcr *callingPr )
546 { VMSReqst *req;
548 req = VMS__malloc( sizeof(VMSReqst) );
549 req->reqType = semantic;
550 req->semReqData = semReqData;
551 req->nextReqst = callingPr->requests;
552 callingPr->requests = req;
553 }
555 /*This inserts the semantic-layer's request data into standard VMS carrier
556 * request data-struct is allocated on stack of this call & ptr to it sent
557 * to plugin
558 *Then it does suspend, to cause request to be sent.
559 */
560 /*inline*/__attribute__ ((noinline)) void
561 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
562 { VMSReqst req;
564 req.reqType = semantic;
565 req.semReqData = semReqData;
566 req.nextReqst = callingPr->requests;
567 callingPr->requests = &req;
569 VMS__suspend_procr( callingPr );
570 }
573 /*inline*/ __attribute__ ((noinline)) void
574 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr )
576 { VMSReqst req;
578 req.reqType = VMSSemantic;
579 req.semReqData = semReqData;
580 req.nextReqst = callingPr->requests; //gab any other preceeding
581 callingPr->requests = &req;
583 VMS__suspend_procr( callingPr );
584 }
587 /*
588 */
589 VMSReqst *
590 VMS__take_next_request_out_of( VirtProcr *procrWithReq )
591 { VMSReqst *req;
593 req = procrWithReq->requests;
594 if( req == NULL ) return NULL;
596 procrWithReq->requests = procrWithReq->requests->nextReqst;
597 return req;
598 }
601 inline void *
602 VMS__take_sem_reqst_from( VMSReqst *req )
603 {
604 return req->semReqData;
605 }
609 /* This is for OS requests and VMS infrastructure requests, such as to create
610 * a probe -- a probe is inside the heart of VMS-core, it's not part of any
611 * language -- but it's also a semantic thing that's triggered from and used
612 * in the application.. so it crosses abstractions.. so, need some special
613 * pattern here for handling such requests.
614 * Doing this just like it were a second language sharing VMS-core.
615 *
616 * This is called from the language's request handler when it sees a request
617 * of type VMSSemReq
618 *
619 * TODO: Later change this, to give probes their own separate plugin & have
620 * VMS-core steer the request to appropriate plugin
621 * Do the same for OS calls -- look later at it..
622 */
623 void inline
624 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv,
625 ResumePrFnPtr resumePrFnPtr )
626 { VMSSemReq *semReq;
627 IntervalProbe *newProbe;
629 semReq = req->semReqData;
631 newProbe = VMS__malloc( sizeof(IntervalProbe) );
632 newProbe->nameStr = VMS__strDup( semReq->nameStr );
633 newProbe->hist = NULL;
634 newProbe->schedChoiceWasRecorded = FALSE;
636 //This runs in masterVP, so no race-condition worries
637 newProbe->probeID =
638 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo );
640 requestingPr->dataRetFromReq = newProbe;
642 (*resumePrFnPtr)( requestingPr, semEnv );
643 }
647 /*This must be called by the request handler plugin -- it cannot be called
648 * from the semantic library "dissipate processor" function -- instead, the
649 * semantic layer has to generate a request, and the plug-in calls this
650 * function.
651 *The reason is that this frees the virtual processor's stack -- which is
652 * still in use inside semantic library calls!
653 *
654 *This frees or recycles all the state owned by and comprising the VMS
655 * portion of the animating virtual procr. The request handler must first
656 * free any semantic data created for the processor that didn't use the
657 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
658 * system to disown any state that did use VMS_malloc, and then frees the
659 * statck and the processor-struct itself.
660 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
661 * state, then that state gets freed (or sent to recycling) as a side-effect
662 * of dis-owning it.
663 */
664 void
665 VMS__dissipate_procr( VirtProcr *animatingPr )
666 {
667 //dis-own all locations owned by this processor, causing to be freed
668 // any locations that it is (was) sole owner of
669 //TODO: implement VMS__malloc system, including "give up ownership"
672 //NOTE: initialData was given to the processor, so should either have
673 // been alloc'd with VMS__malloc, or freed by the level above animPr.
674 //So, all that's left to free here is the stack and the VirtProcr struc
675 // itself
676 //Note, should not stack-allocate initial data -- no guarantee, in
677 // general that creating processor will outlive ones it creates.
678 VMS__free( animatingPr->startOfStack );
679 VMS__free( animatingPr );
680 }
683 //TODO: look at architecting cleanest separation between request handler
684 // and master loop, for dissipate, create, shutdown, and other non-semantic
685 // requests. Issue is chain: one removes requests from AppVP, one dispatches
686 // on type of request, and one handles each type.. but some types require
687 // action from both request handler and master loop -- maybe just give the
688 // request handler calls like: VMS__handle_X_request_type
691 /*This is called by the semantic layer's request handler when it decides its
692 * time to shut down the VMS system. Calling this causes the core loop OS
693 * threads to exit, which unblocks the entry-point function that started up
694 * VMS, and allows it to grab the result and return to the original single-
695 * threaded application.
696 *
697 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
698 * and-wait function has to free a bunch of stuff after it detects the
699 * threads have all died: the masterEnv, the thread-related locations,
700 * masterVP any AppVPs that might still be allocated and sitting in the
701 * semantic environment, or have been orphaned in the _VMSWorkQ.
702 *
703 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
704 * locations it needs, and give ownership to masterVP. Then, they will be
705 * automatically freed.
706 *
707 *In here,create one core-loop shut-down processor for each core loop and put
708 * them all directly into the readyToAnimateQ.
709 *Note, this function can ONLY be called after the semantic environment no
710 * longer cares if AppVPs get animated after the point this is called. In
711 * other words, this can be used as an abort, or else it should only be
712 * called when all AppVPs have finished dissipate requests -- only at that
713 * point is it sure that all results have completed.
714 */
715 void
716 VMS__shutdown()
717 { int coreIdx;
718 VirtProcr *shutDownPr;
720 //create the shutdown processors, one for each core loop -- put them
721 // directly into the Q -- each core will die when gets one
722 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
723 { //Note, this is running in the master
724 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
725 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
726 }
727 #ifdef MEAS__PERF_COUNTERS
728 uint64 tmpc,tmpi;
729 saveCyclesAndInstrs(0,tmpc,tmpi);
730 printf("End: cycles = %llu, instrs = %llu\n",tmpc,tmpi);
731 prctl(PR_TASK_PERF_EVENTS_DISABLE);
732 /*
733 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ){
734 close(_VMSMasterEnv->cycles_counter_fd[coreIdx]);
735 close(_VMSMasterEnv->instrs_counter_fd[coreIdx]);
736 }
737 */
738 #endif
739 }
742 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
743 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
744 *This function has the sole purpose of setting the stack and framePtr
745 * to the coreLoop's stack and framePtr.. it does that then jumps to the
746 * core loop's shutdown point -- might be able to just call Pthread_exit
747 * from here, but am going back to the pthread's stack and setting everything
748 * up just as if it never jumped out, before calling pthread_exit.
749 *The end-point of core loop will free the stack and so forth of the
750 * processor that animates this function, (this fn is transfering the
751 * animator of the AppVP that is in turn animating this function over
752 * to core loop function -- note that this slices out a level of virtual
753 * processors).
754 */
755 void
756 endOSThreadFn( void *initData, VirtProcr *animatingPr )
757 {
758 #ifdef SEQUENTIAL
759 asmTerminateCoreLoopSeq(animatingPr);
760 #else
761 asmTerminateCoreLoop(animatingPr);
762 #endif
763 }
766 /*This is called from the startup & shutdown
767 */
768 void
769 VMS__cleanup_at_end_of_shutdown()
770 {
771 //unused
772 //VMSQueueStruc **readyToAnimateQs;
773 //int coreIdx;
774 //VirtProcr **masterVPs;
775 //SchedSlot ***allSchedSlots; //ptr to array of ptrs
777 //Before getting rid of everything, print out any measurements made
778 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
779 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
780 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, &freeHistExt );
781 #ifdef DETECT_DEPENDENCIES
782 FILE* output;
783 int n;
784 char filename[255];
785 for(n=0;n<255;n++)
786 {
787 sprintf(filename, "./counters/Dependencies.%d.dot",n);
788 output = fopen(filename,"r");
789 if(output)
790 {
791 fclose(output);
792 }else{
793 break;
794 }
795 }
796 if(n<255){
797 printf("Saving Dependencies to File: %s ...\n", filename);
798 output = fopen(filename,"w+");
799 if(output!=NULL){
800 set_dependency_file(output);
801 fprintf(output,"digraph Dependencies {\n");
802 set_dot_file(output);
803 forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
804 forAllInDynArrayDo( _VMSMasterEnv->dependenciesInfo, &print_dependency_to_file );
805 fprintf(output,"}\n");
806 } else
807 printf("Opening Dependencies file failed. Please check that folder \"counters\" exists in run directory.\n");
808 } else {
809 printf("Could not open Dependencies file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
810 }
811 #endif
812 #ifdef MEAS__TIME_PLUGIN
813 printHist( _VMSMasterEnv->reqHdlrLowTimeHist );
814 saveHistToFile( _VMSMasterEnv->reqHdlrLowTimeHist );
815 printHist( _VMSMasterEnv->reqHdlrHighTimeHist );
816 saveHistToFile( _VMSMasterEnv->reqHdlrHighTimeHist );
817 freeHistExt( _VMSMasterEnv->reqHdlrLowTimeHist );
818 freeHistExt( _VMSMasterEnv->reqHdlrHighTimeHist );
819 #endif
820 #ifdef MEAS__TIME_MALLOC
821 printHist( _VMSMasterEnv->mallocTimeHist );
822 saveHistToFile( _VMSMasterEnv->mallocTimeHist );
823 printHist( _VMSMasterEnv->freeTimeHist );
824 saveHistToFile( _VMSMasterEnv->freeTimeHist );
825 freeHistExt( _VMSMasterEnv->mallocTimeHist );
826 freeHistExt( _VMSMasterEnv->freeTimeHist );
827 #endif
828 #ifdef MEAS__TIME_MASTER_LOCK
829 printHist( _VMSMasterEnv->masterLockLowTimeHist );
830 printHist( _VMSMasterEnv->masterLockHighTimeHist );
831 #endif
832 #ifdef MEAS__TIME_MASTER
833 printHist( _VMSMasterEnv->pluginTimeHist );
834 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
835 {
836 freeVMSQ( readyToAnimateQs[ coreIdx ] );
837 //master VPs were created external to VMS, so use external free
838 VMS__dissipate_procr( masterVPs[ coreIdx ] );
840 freeSchedSlots( allSchedSlots[ coreIdx ] );
841 }
842 #endif
843 #ifdef MEAS__TIME_STAMP_SUSP
844 printHist( _VMSMasterEnv->pluginTimeHist );
845 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
846 {
847 freeVMSQ( readyToAnimateQs[ coreIdx ] );
848 //master VPs were created external to VMS, so use external free
849 VMS__dissipate_procr( masterVPs[ coreIdx ] );
851 freeSchedSlots( allSchedSlots[ coreIdx ] );
852 }
853 #endif
855 //All the environment data has been allocated with VMS__malloc, so just
856 // free its internal big-chunk and all inside it disappear.
857 /*
858 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
859 masterVPs = _VMSMasterEnv->masterVPs;
860 allSchedSlots = _VMSMasterEnv->allSchedSlots;
862 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
863 {
864 freeVMSQ( readyToAnimateQs[ coreIdx ] );
865 //master VPs were created external to VMS, so use external free
866 VMS__dissipate_procr( masterVPs[ coreIdx ] );
868 freeSchedSlots( allSchedSlots[ coreIdx ] );
869 }
871 VMS__free( _VMSMasterEnv->readyToAnimateQs );
872 VMS__free( _VMSMasterEnv->masterVPs );
873 VMS__free( _VMSMasterEnv->allSchedSlots );
875 //============================= MEASUREMENT STUFF ========================
876 #ifdef STATS__TURN_ON_PROBES
877 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe);
878 #endif
879 //========================================================================
880 */
881 //These are the only two that use system free
882 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead );
883 free( (void *)_VMSMasterEnv );
884 }
887 //================================
890 /*Later, improve this -- for now, just exits the application after printing
891 * the error message.
892 */
893 void
894 VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData )
895 {
896 printf("%s",msgStr);
897 fflush(stdin);
898 exit(1);
899 }