view VMS.c @ 37:d6367cd40e21

Change in a comment from VMSHW to SSR
author Me
date Wed, 01 Sep 2010 09:18:40 -0700
parents 65e5918731b8
children
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
15 #define thdAttrs NULL
17 //===========================================================================
18 void
19 shutdownFn( void *dummy, VirtProcr *dummy2 );
21 void
22 create_sched_slots( MasterEnv *masterEnv );
24 void
25 create_masterEnv();
27 void
28 create_the_coreLoop_OS_threads();
30 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
31 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
33 //===========================================================================
35 /*Setup has two phases:
36 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
37 * the master virt procr into the work-queue, ready for first "call"
38 * 2) Semantic layer then does its own init, which creates the seed virt
39 * procr inside the semantic layer, ready to schedule it when
40 * asked by the first run of the masterLoop.
41 *
42 *This part is bit weird because VMS really wants to be "always there", and
43 * have applications attach and detach.. for now, this VMS is part of
44 * the app, so the VMS system starts up as part of running the app.
45 *
46 *The semantic layer is isolated from the VMS internals by making the
47 * semantic layer do setup to a state that it's ready with its
48 * initial virt procrs, ready to schedule them to slots when the masterLoop
49 * asks. Without this pattern, the semantic layer's setup would
50 * have to modify slots directly to assign the initial virt-procrs, and put
51 * them into the workQ itself, breaking the isolation completely.
52 *
53 *
54 *The semantic layer creates the initial virt procr(s), and adds its
55 * own environment to masterEnv, and fills in the pointers to
56 * the requestHandler and slaveScheduler plug-in functions
57 */
59 /*This allocates VMS data structures, populates the master VMSProc,
60 * and master environment, and returns the master environment to the semantic
61 * layer.
62 */
63 void
64 VMS__init()
65 {
66 create_masterEnv();
67 create_the_coreLoop_OS_threads();
68 }
70 /*To initialize the sequential version, just don't create the threads
71 */
72 void
73 VMS__init_Seq()
74 {
75 create_masterEnv();
76 }
78 void
79 create_masterEnv()
80 { MasterEnv *masterEnv;
81 VMSQueueStruc *workQ;
83 //Make the central work-queue
84 _VMSWorkQ = makeVMSQ();
85 workQ = _VMSWorkQ;
87 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
88 masterEnv = _VMSMasterEnv;
90 //create the master virtual processor
91 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
93 create_sched_slots( masterEnv );
95 masterEnv->stillRunning = FALSE;
96 masterEnv->numToPrecede = NUM_CORES;
98 //First core loop to start up gets this, which will schedule seed Pr
99 //TODO: debug: check address of masterVirtPr
100 writeVMSQ( masterEnv->masterVirtPr, workQ );
102 numProcrsCreated = 1; //global counter for debugging
104 //==================== malloc substitute ========================
105 //
106 //Testing whether malloc is using thread-local storage and therefore
107 // causing unreliable behavior.
108 //Just allocate a massive chunk of memory and roll own malloc/free and
109 // make app use VMS__malloc_to, which will suspend and perform malloc
110 // in the master, taking from this massive chunk.
112 // initFreeList();
113 }
115 /*
116 void
117 initMasterMalloc()
118 {
119 _VMSMasterEnv->mallocChunk = malloc( MASSIVE_MALLOC_SIZE );
121 //The free-list element is the first several locations of an
122 // allocated chunk -- the address given to the application is pre-
123 // pended with both the ownership structure and the free-list struc.
124 //So, write the values of these into the first locations of
125 // mallocChunk -- which marks it as free & puts in its size.
126 listElem = (FreeListElem *)_VMSMasterEnv->mallocChunk;
127 listElem->size = MASSIVE_MALLOC_SIZE - NUM_PREPEND_BYTES
128 listElem->next = NULL;
129 }
131 void
132 dissipateMasterMalloc()
133 {
134 //Just foo code -- to get going -- doing as if free list were link-list
135 currElem = _VMSMasterEnv->freeList;
136 while( currElem != NULL )
137 {
138 nextElem = currElem->next;
139 masterFree( currElem );
140 currElem = nextElem;
141 }
142 free( _VMSMasterEnv->freeList );
143 }
144 */
146 void
147 create_sched_slots( MasterEnv *masterEnv )
148 { SchedSlot **schedSlots, **filledSlots;
149 int i;
151 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
152 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
153 masterEnv->schedSlots = schedSlots;
154 masterEnv->filledSlots = filledSlots;
156 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
157 {
158 schedSlots[i] = malloc( sizeof(SchedSlot) );
160 //Set state to mean "handling requests done, slot needs filling"
161 schedSlots[i]->workIsDone = FALSE;
162 schedSlots[i]->needsProcrAssigned = TRUE;
163 }
164 }
167 void
168 create_the_coreLoop_OS_threads()
169 {
170 //========================================================================
171 // Create the Threads
172 int coreIdx, retCode;
174 //Need the threads to be created suspended, and wait for a signal
175 // before proceeding -- gives time after creating to initialize other
176 // stuff before the coreLoops set off.
177 _VMSMasterEnv->setupComplete = 0;
179 //Make the threads that animate the core loops
180 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
181 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) );
182 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
184 retCode =
185 pthread_create( &(coreLoopThdHandles[coreIdx]),
186 thdAttrs,
187 &coreLoop,
188 (void *)(coreLoopThdParams[coreIdx]) );
189 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);}
190 }
191 }
193 /*Semantic layer calls this when it want the system to start running..
194 *
195 *This starts the core loops running then waits for them to exit.
196 */
197 void
198 VMS__start_the_work_then_wait_until_done()
199 { int coreIdx;
200 //Start the core loops running
201 //===========================================================================
202 TSCount startCount, endCount;
203 unsigned long long count = 0, freq = 0;
204 double runTime;
206 startCount = getTSCount();
208 //tell the core loop threads that setup is complete
209 //get lock, to lock out any threads still starting up -- they'll see
210 // that setupComplete is true before entering while loop, and so never
211 // wait on the condition
212 pthread_mutex_lock( &suspendLock );
213 _VMSMasterEnv->setupComplete = 1;
214 pthread_mutex_unlock( &suspendLock );
215 pthread_cond_broadcast( &suspend_cond );
218 //wait for all to complete
219 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
220 {
221 pthread_join( coreLoopThdHandles[coreIdx], NULL );
222 }
224 //NOTE: do not clean up VMS env here -- semantic layer has to have
225 // a chance to clean up its environment first, then do a call to free
226 // the Master env and rest of VMS locations
229 endCount = getTSCount();
230 count = endCount - startCount;
232 runTime = (double)count / (double)TSCOUNT_FREQ;
234 printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
235 }
237 /*Only difference between version with an OS thread pinned to each core and
238 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
239 */
240 void
241 VMS__start_the_work_then_wait_until_done_Seq()
242 {
243 //Instead of un-suspending threads, just call the one and only
244 // core loop (sequential version), in the main thread.
245 coreLoop_Seq( NULL );
247 }
251 /*Create stack, then create __cdecl structure on it and put initialData and
252 * pointer to the new structure instance into the parameter positions on
253 * the stack
254 *Then put function pointer into nextInstrPt -- the stack is setup in std
255 * call structure, so jumping to function ptr is same as a GCC generated
256 * function call
257 *No need to save registers on old stack frame, because there's no old
258 * animator state to return to --
259 *
260 */
261 VirtProcr *
262 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
263 { VirtProcr *newPr;
264 char *stackLocs, *stackPtr;
266 newPr = malloc( sizeof(VirtProcr) );
267 newPr->procrID = numProcrsCreated++;
268 newPr->nextInstrPt = fnPtr;
269 newPr->initialData = initialData;
270 newPr->requests = NULL;
272 //fnPtr takes two params -- void *initData & void *animProcr
273 //alloc stack locations, make stackPtr be the highest addr minus room
274 // for 2 params + return addr. Return addr (NULL) is in loc pointed to
275 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above
276 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
277 if(stackLocs == 0) {perror("malloc stack"); exit(1);}
278 newPr->startOfStack = stackLocs;
279 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 );
280 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
281 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer
282 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left
283 newPr->stackPtr = stackPtr; //core loop will switch to this, then
284 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
286 return newPr;
287 }
290 /*there is a label inside this function -- save the addr of this label in
291 * the callingPr struc, as the pick-up point from which to start the next
292 * work-unit for that procr. If turns out have to save registers, then
293 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
294 * "done with work-unit" label. The procr struc is in the request in the
295 * slave that animated the just-ended work-unit, so all the state is saved
296 * there, and will get passed along, inside the request handler, to the
297 * next work-unit for that procr.
298 */
299 void
300 VMS__suspend_procr( VirtProcr *callingPr )
301 { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr;
302 void *coreLoopFramePtr;
304 //The request to master will cause this suspended virt procr to get
305 // scheduled again at some future point -- to resume, core loop jumps
306 // to the resume point (below), which causes restore of saved regs and
307 // "return" from this call.
308 callingPr->nextInstrPt = &&ResumePt;
310 //return ownership of the virt procr and sched slot to Master virt pr
311 callingPr->schedSlot->workIsDone = TRUE;
312 // coreIdx = callingPr->coreAnimatedBy;
314 stackPtrAddr = &(callingPr->stackPtr);
315 framePtrAddr = &(callingPr->framePtr);
317 jmpPt = callingPr->coreLoopStartPt;
318 coreLoopFramePtr = callingPr->coreLoopFramePtr;//need this only
319 coreLoopStackPtr = callingPr->coreLoopStackPtr;//shouldn't need -- safety
321 //Eclipse's compilation sequence complains -- so break into two
322 // separate in-line assembly pieces
323 //Save the virt procr's stack and frame ptrs,
324 asm volatile("movl %0, %%eax; \
325 movl %%esp, (%%eax); \
326 movl %1, %%eax; \
327 movl %%ebp, (%%eax) "\
328 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \
329 /* inputs */ : \
330 /* clobber */ : "%eax" \
331 );
333 //restore coreloop's frame ptr, then jump back to "start" of core loop
334 //Note, GCC compiles to assembly that saves esp and ebp in the stack
335 // frame -- so have to explicitly do assembly that saves to memory
336 asm volatile("movl %0, %%eax; \
337 movl %1, %%esp; \
338 movl %2, %%ebp; \
339 jmp %%eax " \
340 /* outputs */ : \
341 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
342 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
343 ); //list everything as clobbered to force GCC to save all
344 // live vars that are in regs on stack before this
345 // assembly, so that stack pointer is correct, before jmp
347 ResumePt:
348 return;
349 }
353 /*Not sure yet the form going to put "dissipate" in, so this is the third
354 * possibility -- the semantic layer can just make a macro that looks like
355 * a call to its name, then expands to a call to this.
356 *
357 *As of June 30, 2010 this looks like the top choice..
358 *
359 *This adds a request to dissipate, then suspends the processor so that the
360 * request handler will receive the request. The request handler is what
361 * does the work of freeing memory and removing the processor from the
362 * semantic environment's data structures.
363 *The request handler also is what figures out when to shutdown the VMS
364 * system -- which causes all the core loop threads to die, and returns from
365 * the call that started up VMS to perform the work.
366 *
367 *This form is a bit misleading to understand if one is trying to figure out
368 * how VMS works -- it looks like a normal function call, but inside it
369 * sends a request to the request handler and suspends the processor, which
370 * jumps out of the VMS__dissipate_procr function, and out of all nestings
371 * above it, transferring the work of dissipating to the request handler,
372 * which then does the actual work -- causing the processor that animated
373 * the call of this function to disappear and the "hanging" state of this
374 * function to just poof into thin air -- the virtual processor's trace
375 * never returns from this call, but instead the virtual processor's trace
376 * gets suspended in this call and all the virt processor's state disap-
377 * pears -- making that suspend the last thing in the virt procr's trace.
378 */
379 void
380 VMS__dissipate_procr( VirtProcr *procrToDissipate )
381 { VMSReqst *req;
383 req = malloc( sizeof(VMSReqst) );
384 // req->virtProcrFrom = callingPr;
385 req->reqType = dissipate;
386 req->nextReqst = procrToDissipate->requests;
387 procrToDissipate->requests = req;
389 VMS__suspend_procr( procrToDissipate );
390 }
393 /*This inserts the semantic-layer's request data into standard VMS carrier
394 */
395 inline void
396 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
397 { VMSReqst *req;
399 req = malloc( sizeof(VMSReqst) );
400 // req->virtProcrFrom = callingPr;
401 req->reqType = semantic;
402 req->semReqData = semReqData;
403 req->nextReqst = callingPr->requests;
404 callingPr->requests = req;
405 }
409 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
410 // of a request -- IE call with both a virt procr and a fn-ptr to request
411 // freer (or maybe put request freer as a field in virt procr?)
412 void
413 VMS__remove_and_free_top_request( VirtProcr *procrWithReq )
414 { VMSReqst *req;
416 req = procrWithReq->requests;
417 if( req == NULL ) return;
418 procrWithReq->requests = procrWithReq->requests->nextReqst;
419 VMS__free_request( req );
420 }
423 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
424 // of a request -- IE call with both a virt procr and a fn-ptr to request
425 // freer (also maybe put sem request freer as a field in virt procr?)
426 //SSR relies right now on this only freeing VMS layer of request -- the
427 // semantic portion of request is alloc'd and freed by request handler
428 void
429 VMS__free_request( VMSReqst *req )
430 {
431 free( req );
432 }
434 VMSReqst *
435 VMS__take_top_request_from( VirtProcr *procrWithReq )
436 { VMSReqst *req;
438 req = procrWithReq->requests;
439 if( req == NULL ) return req;
441 procrWithReq->requests = procrWithReq->requests->nextReqst;
442 return req;
443 }
445 VMSReqst *
446 VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq )
447 { VMSReqst *req;
449 req = procrWithReq->requests;
450 if( req == NULL ) return req;
452 procrWithReq->requests = procrWithReq->requests->nextReqst;
453 VMS__free_request( req );
454 return procrWithReq->requests;
455 }
458 inline int
459 VMS__isSemanticReqst( VMSReqst *req )
460 {
461 return ( req->reqType == semantic );
462 }
465 inline void *
466 VMS__take_sem_reqst_from( VMSReqst *req )
467 {
468 return req->semReqData;
469 }
471 inline int
472 VMS__isDissipateReqst( VMSReqst *req )
473 {
474 return ( req->reqType == dissipate );
475 }
477 inline int
478 VMS__isCreateReqst( VMSReqst *req )
479 {
480 return ( req->reqType == regCreated );
481 }
483 void
484 VMS__send_register_new_procr_request(VirtProcr *newPr, VirtProcr *reqstingPr)
485 { VMSReqst *req;
487 req = malloc( sizeof(VMSReqst) );
488 req->reqType = regCreated;
489 req->semReqData = newPr;
490 req->nextReqst = reqstingPr->requests;
491 reqstingPr->requests = req;
493 VMS__suspend_procr( reqstingPr );
494 }
498 /*This must be called by the request handler plugin -- it cannot be called
499 * from the semantic library "dissipate processor" function -- instead, the
500 * semantic layer has to generate a request for the plug-in to call this
501 * function.
502 *The reason is that this frees the virtual processor's stack -- which is
503 * still in use inside semantic library calls!
504 *
505 *This frees or recycles all the state owned by and comprising the VMS
506 * portion of the animating virtual procr. The request handler must first
507 * free any semantic data created for the processor that didn't use the
508 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
509 * system to disown any state that did use VMS_malloc, and then frees the
510 * statck and the processor-struct itself.
511 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
512 * state, then that state gets freed (or sent to recycling) as a side-effect
513 * of dis-owning it.
514 */
515 void
516 VMS__handle_dissipate_reqst( VirtProcr *animatingPr )
517 {
518 //dis-own all locations owned by this processor, causing to be freed
519 // any locations that it is (was) sole owner of
520 //TODO: implement VMS__malloc system, including "give up ownership"
522 //The dissipate request might still be attached, so remove and free it
523 VMS__remove_and_free_top_request( animatingPr );
525 //NOTE: initialData was given to the processor, so should either have
526 // been alloc'd with VMS__malloc, or freed by the level above animPr.
527 //So, all that's left to free here is the stack and the VirtProcr struc
528 // itself
529 free( animatingPr->startOfStack );
530 free( animatingPr );
531 }
534 //TODO: re-architect so that have clean separation between request handler
535 // and master loop, for dissipate, create, shutdown, and other non-semantic
536 // requests. Issue is chain: one removes requests from AppVP, one dispatches
537 // on type of request, and one handles each type.. but some types require
538 // action from both request handler and master loop -- maybe just give the
539 // request handler calls like: VMS__handle_X_request_type
541 void
542 endOSThreadFn( void *initData, VirtProcr *animatingPr );
544 /*This is called by the semantic layer's request handler when it decides its
545 * time to shut down the VMS system. Calling this causes the core loop OS
546 * threads to exit, which unblocks the entry-point function that started up
547 * VMS, and allows it to grab the result and return to the original single-
548 * threaded application.
549 *
550 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
551 * and-wait function has to free a bunch of stuff after it detects the
552 * threads have all died: the masterEnv, the thread-related locations,
553 * masterVP any AppVPs that might still be allocated and sitting in the
554 * semantic environment, or have been orphaned in the _VMSWorkQ.
555 *
556 *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the
557 * locations it needs, and give ownership to masterVP. Then, they will be
558 * automatically freed when the masterVP is dissipated. (This happens after
559 * the core loop threads have all exited)
560 *
561 *In here,create one core-loop shut-down processor for each core loop and put
562 * them all directly into the workQ.
563 *Note, this function can ONLY be called after the semantic environment no
564 * longer cares if AppVPs get animated after the point this is called. In
565 * other words, this can be used as an abort, or else it should only be
566 * called when all AppVPs have finished dissipate requests -- only at that
567 * point is it sure that all results have completed.
568 */
569 void
570 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr )
571 { int coreIdx;
572 VirtProcr *shutDownPr;
573 VMSQueueStruc *workQ = _VMSWorkQ;
575 //create the shutdown processors, one for each core loop -- put them
576 // directly into _VMSWorkQ -- each core will die when gets one, so
577 // the system distributes them evenly itself.
578 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
579 {
580 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
581 writeVMSQ( shutDownPr, workQ );
582 }
584 }
587 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
588 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
589 *This function has the sole purpose of setting the stack and framePtr
590 * to the coreLoop's stack and framePtr.. it does that then jumps to the
591 * core loop's shutdown point -- might be able to just call Pthread_exit
592 * from here, but am going back to the pthread's stack and setting everything
593 * up just as if it never jumped out, before calling pthread_exit.
594 *The end-point of core loop will free the stack and so forth of the
595 * processor that animates this function, (this fn is transfering the
596 * animator of the AppVP that is in turn animating this function over
597 * to core loop function -- note that this slices out a level of virtual
598 * processors).
599 */
600 void
601 endOSThreadFn( void *initData, VirtProcr *animatingPr )
602 { void *jmpPt, *coreLoopStackPtr, *coreLoopFramePtr;
604 jmpPt = _VMSMasterEnv->coreLoopEndPt;
605 coreLoopStackPtr = animatingPr->coreLoopStackPtr;
606 coreLoopFramePtr = animatingPr->coreLoopFramePtr;
609 asm volatile("movl %0, %%eax; \
610 movl %1, %%esp; \
611 movl %2, %%ebp; \
612 jmp %%eax " \
613 /* outputs */ : \
614 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
615 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
616 );
617 }
621 /*This is called after the threads have shut down and control as returned
622 * to the semantic layer, in the entry point function in the main thread.
623 * It has to free anything allocated during VMS_init, and any other alloc'd
624 * locations that might be left over.
625 */
626 void
627 VMS__cleanup_after_shutdown()
628 { int i;
630 free( _VMSWorkQ );
631 free( _VMSMasterEnv->filledSlots );
632 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
633 {
634 free( _VMSMasterEnv->schedSlots[i] );
635 }
637 free( _VMSMasterEnv->schedSlots);
638 VMS__handle_dissipate_reqst( _VMSMasterEnv->masterVirtPr );
640 free( _VMSMasterEnv );
641 }
644 //===========================================================================
646 inline TSCount getTSCount()
647 { unsigned int low, high;
648 TSCount out;
650 saveTimeStampCountInto( low, high );
651 out = high;
652 out = (out << 32) + low;
653 return out;
654 }