view VMS.c @ 30:c8823e0bb2b4

Started adding own version of malloc and free Just in case they're using TLS and causing the issues
author Me
date Mon, 09 Aug 2010 02:24:31 -0700
parents 0e008278fe3c
children e69579a0e797 65e5918731b8
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
15 #define thdAttrs NULL
17 //===========================================================================
18 void
19 shutdownFn( void *dummy, VirtProcr *dummy2 );
21 void
22 create_sched_slots( MasterEnv *masterEnv );
24 void
25 create_masterEnv();
27 void
28 create_the_coreLoop_OS_threads();
30 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
31 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
33 //===========================================================================
35 /*Setup has two phases:
36 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
37 * the master virt procr into the work-queue, ready for first "call"
38 * 2) Semantic layer then does its own init, which creates the seed virt
39 * procr inside the semantic layer, ready to schedule it when
40 * asked by the first run of the masterLoop.
41 *
42 *This part is bit weird because VMS really wants to be "always there", and
43 * have applications attach and detach.. for now, this VMS is part of
44 * the app, so the VMS system starts up as part of running the app.
45 *
46 *The semantic layer is isolated from the VMS internals by making the
47 * semantic layer do setup to a state that it's ready with its
48 * initial virt procrs, ready to schedule them to slots when the masterLoop
49 * asks. Without this pattern, the semantic layer's setup would
50 * have to modify slots directly to assign the initial virt-procrs, and put
51 * them into the workQ itself, breaking the isolation completely.
52 *
53 *
54 *The semantic layer creates the initial virt procr(s), and adds its
55 * own environment to masterEnv, and fills in the pointers to
56 * the requestHandler and slaveScheduler plug-in functions
57 */
59 /*This allocates VMS data structures, populates the master VMSProc,
60 * and master environment, and returns the master environment to the semantic
61 * layer.
62 */
63 void
64 VMS__init()
65 {
66 create_masterEnv();
67 create_the_coreLoop_OS_threads();
68 }
70 /*To initialize the sequential version, just don't create the threads
71 */
72 void
73 VMS__init_Seq()
74 {
75 create_masterEnv();
76 }
78 void
79 create_masterEnv()
80 { MasterEnv *masterEnv;
81 VMSQueueStruc *workQ;
83 //Make the central work-queue
84 _VMSWorkQ = makeVMSQ();
85 workQ = _VMSWorkQ;
87 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
88 masterEnv = _VMSMasterEnv;
90 //create the master virtual processor
91 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
93 create_sched_slots( masterEnv );
95 masterEnv->stillRunning = FALSE;
96 masterEnv->numToPrecede = NUM_CORES;
98 //First core loop to start up gets this, which will schedule seed Pr
99 //TODO: debug: check address of masterVirtPr
100 writeVMSQ( masterEnv->masterVirtPr, workQ );
102 numProcrsCreated = 1; //global counter for debugging
104 //==================== malloc substitute ========================
105 //
106 //Testing whether malloc is using thread-local storage and therefore
107 // causing unreliable behavior.
108 //Just allocate a massive chunk of memory and roll own malloc/free and
109 // make app use VMS__malloc_to, which will suspend and perform malloc
110 // in the master, taking from this massive chunk.
112 // initFreeList();
113 }
115 /*
116 void
117 initMasterMalloc()
118 {
119 _VMSMasterEnv->mallocChunk = malloc( MASSIVE_MALLOC_SIZE );
121 //The free-list element is the first several locations of an
122 // allocated chunk -- the address given to the application is pre-
123 // pended with both the ownership structure and the free-list struc.
124 //So, write the values of these into the first locations of
125 // mallocChunk -- which marks it as free & puts in its size.
126 listElem = (FreeListElem *)_VMSMasterEnv->mallocChunk;
127 listElem->size = MASSIVE_MALLOC_SIZE - NUM_PREPEND_BYTES
128 listElem->next = NULL;
129 }
131 void
132 dissipateMasterMalloc()
133 {
134 //Just foo code -- to get going -- doing as if free list were link-list
135 currElem = _VMSMasterEnv->freeList;
136 while( currElem != NULL )
137 {
138 nextElem = currElem->next;
139 masterFree( currElem );
140 currElem = nextElem;
141 }
142 free( _VMSMasterEnv->freeList );
143 }
144 */
146 void
147 create_sched_slots( MasterEnv *masterEnv )
148 { SchedSlot **schedSlots, **filledSlots;
149 int i;
151 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
152 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
153 masterEnv->schedSlots = schedSlots;
154 masterEnv->filledSlots = filledSlots;
156 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
157 {
158 schedSlots[i] = malloc( sizeof(SchedSlot) );
160 //Set state to mean "handling requests done, slot needs filling"
161 schedSlots[i]->workIsDone = FALSE;
162 schedSlots[i]->needsProcrAssigned = TRUE;
163 }
164 }
167 void
168 create_the_coreLoop_OS_threads()
169 {
170 //========================================================================
171 // Create the Threads
172 int coreIdx, retCode;
174 //Need the threads to be created suspended, and wait for a signal
175 // before proceeding -- gives time after creating to initialize other
176 // stuff before the coreLoops set off.
177 _VMSMasterEnv->setupComplete = 0;
179 //Make the threads that animate the core loops
180 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
181 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) );
182 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
184 retCode =
185 pthread_create( &(coreLoopThdHandles[coreIdx]),
186 thdAttrs,
187 &coreLoop,
188 (void *)(coreLoopThdParams[coreIdx]) );
189 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);}
190 }
191 }
193 /*Semantic layer calls this when it want the system to start running..
194 *
195 *This starts the core loops running then waits for them to exit.
196 */
197 void
198 VMS__start_the_work_then_wait_until_done()
199 { int coreIdx;
200 //Start the core loops running
201 //===========================================================================
202 TSCount startCount, endCount;
203 unsigned long long count = 0, freq = 0;
204 double runTime;
206 startCount = getTSCount();
208 //tell the core loop threads that setup is complete
209 //get lock, to lock out any threads still starting up -- they'll see
210 // that setupComplete is true before entering while loop, and so never
211 // wait on the condition
212 pthread_mutex_lock( &suspendLock );
213 _VMSMasterEnv->setupComplete = 1;
214 pthread_mutex_unlock( &suspendLock );
215 pthread_cond_broadcast( &suspend_cond );
218 //wait for all to complete
219 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
220 {
221 pthread_join( coreLoopThdHandles[coreIdx], NULL );
222 }
224 //NOTE: do not clean up VMS env here -- semantic layer has to have
225 // a chance to clean up its environment first, then do a call to free
226 // the Master env and rest of VMS locations
229 endCount = getTSCount();
230 count = endCount - startCount;
232 runTime = (double)count / (double)TSCOUNT_FREQ;
234 printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
235 }
237 /*Only difference between version with an OS thread pinned to each core and
238 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
239 */
240 void
241 VMS__start_the_work_then_wait_until_done_Seq()
242 {
243 //Instead of un-suspending threads, just call the one and only
244 // core loop (sequential version), in the main thread.
245 coreLoop_Seq( NULL );
247 }
251 /*Create stack, then create __cdecl structure on it and put initialData and
252 * pointer to the new structure instance into the parameter positions on
253 * the stack
254 *Then put function pointer into nextInstrPt -- the stack is setup in std
255 * call structure, so jumping to function ptr is same as a GCC generated
256 * function call
257 *No need to save registers on old stack frame, because there's no old
258 * animator state to return to --
259 *
260 */
261 VirtProcr *
262 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
263 { VirtProcr *newPr;
264 char *stackLocs, *stackPtr;
266 newPr = malloc( sizeof(VirtProcr) );
267 newPr->procrID = numProcrsCreated++;
268 newPr->nextInstrPt = fnPtr;
269 newPr->initialData = initialData;
271 //fnPtr takes two params -- void *initData & void *animProcr
272 //alloc stack locations, make stackPtr be the highest addr minus room
273 // for 2 params + return addr. Return addr (NULL) is in loc pointed to
274 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above
275 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
276 if(stackLocs == 0)
277 {perror("malloc stack"); exit(1);}
278 newPr->startOfStack = stackLocs;
279 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 );
280 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
281 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer
282 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left
283 newPr->stackPtr = stackPtr; //core loop will switch to this, then
284 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
286 return newPr;
287 }
290 /*there is a label inside this function -- save the addr of this label in
291 * the callingPr struc, as the pick-up point from which to start the next
292 * work-unit for that procr. If turns out have to save registers, then
293 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
294 * "done with work-unit" label. The procr struc is in the request in the
295 * slave that animated the just-ended work-unit, so all the state is saved
296 * there, and will get passed along, inside the request handler, to the
297 * next work-unit for that procr.
298 */
299 void
300 VMS__suspend_procr( VirtProcr *callingPr )
301 { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr;
302 void *coreLoopFramePtr;
304 //The request to master will cause this suspended virt procr to get
305 // scheduled again at some future point -- to resume, core loop jumps
306 // to the resume point (below), which causes restore of saved regs and
307 // "return" from this call.
308 callingPr->nextInstrPt = &&ResumePt;
310 //return ownership of the virt procr and sched slot to Master virt pr
311 callingPr->schedSlot->workIsDone = TRUE;
312 // coreIdx = callingPr->coreAnimatedBy;
314 stackPtrAddr = &(callingPr->stackPtr);
315 framePtrAddr = &(callingPr->framePtr);
317 jmpPt = callingPr->coreLoopStartPt;
318 coreLoopFramePtr = callingPr->coreLoopFramePtr;//need this only
319 coreLoopStackPtr = callingPr->coreLoopStackPtr;//shouldn't need -- safety
321 //Eclipse's compilation sequence complains -- so break into two
322 // separate in-line assembly pieces
323 //Save the virt procr's stack and frame ptrs,
324 asm volatile("movl %0, %%eax; \
325 movl %%esp, (%%eax); \
326 movl %1, %%eax; \
327 movl %%ebp, (%%eax) "\
328 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \
329 /* inputs */ : \
330 /* clobber */ : "%eax" \
331 );
333 //restore coreloop's frame ptr, then jump back to "start" of core loop
334 //Note, GCC compiles to assembly that saves esp and ebp in the stack
335 // frame -- so have to explicitly do assembly that saves to memory
336 asm volatile("movl %0, %%eax; \
337 movl %1, %%esp; \
338 movl %2, %%ebp; \
339 jmp %%eax " \
340 /* outputs */ : \
341 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
342 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
343 ); //list everything as clobbered to force GCC to save all
344 // live vars that are in regs on stack before this
345 // assembly, so that stack pointer is correct, before jmp
347 ResumePt:
348 return;
349 }
353 /*This is equivalent to "jump back to core loop" -- it's mainly only used
354 * just after adding dissipate request to a processor -- so the semantic
355 * layer is the only place it will be seen and/or used.
356 *
357 *It does almost the same thing as suspend, except don't need to save the
358 * stack nor set the nextInstrPt
359 *
360 *As of June 30, 2010 just implementing as a call to suspend -- just sugar
361 */
362 void
363 VMS__return_from_fn( VirtProcr *animatingPr )
364 {
365 VMS__suspend_procr( animatingPr );
366 }
369 /*Not sure yet the form going to put "dissipate" in, so this is the third
370 * possibility -- the semantic layer can just make a macro that looks like
371 * a call to its name, then expands to a call to this.
372 *
373 *As of June 30, 2010 this looks like the top choice..
374 *
375 *This adds a request to dissipate, then suspends the processor so that the
376 * request handler will receive the request. The request handler is what
377 * does the work of freeing memory and removing the processor from the
378 * semantic environment's data structures.
379 *The request handler also is what figures out when to shutdown the VMS
380 * system -- which causes all the core loop threads to die, and returns from
381 * the call that started up VMS to perform the work.
382 *
383 *This form is a bit misleading to understand if one is trying to figure out
384 * how VMS works -- it looks like a normal function call, but inside it
385 * sends a request to the request handler and suspends the processor, which
386 * jumps out of the VMS__dissipate_procr function, and out of all nestings
387 * above it, transferring the work of dissipating to the request handler,
388 * which then does the actual work -- causing the processor that animated
389 * the call of this function to disappear and the "hanging" state of this
390 * function to just poof into thin air -- the virtual processor's trace
391 * never returns from this call, but instead the virtual processor's trace
392 * gets suspended in this call and all the virt processor's state disap-
393 * pears -- making that suspend the last thing in the virt procr's trace.
394 */
395 void
396 VMS__dissipate_procr( VirtProcr *procrToDissipate )
397 { VMSReqst *req;
399 req = malloc( sizeof(VMSReqst) );
400 // req->virtProcrFrom = callingPr;
401 req->reqType = dissipate;
402 req->nextReqst = procrToDissipate->requests;
403 procrToDissipate->requests = req;
405 VMS__suspend_procr( procrToDissipate );
406 }
409 /*This inserts the semantic-layer's request data into standard VMS carrier
410 */
411 inline void
412 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
413 { VMSReqst *req;
415 req = malloc( sizeof(VMSReqst) );
416 // req->virtProcrFrom = callingPr;
417 req->reqType = semantic;
418 req->semReqData = semReqData;
419 req->nextReqst = callingPr->requests;
420 callingPr->requests = req;
421 }
425 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
426 // of a request -- IE call with both a virt procr and a fn-ptr to request
427 // freer (or maybe put request freer as a field in virt procr?)
428 void
429 VMS__remove_and_free_top_request( VirtProcr *procrWithReq )
430 { VMSReqst *req;
432 req = procrWithReq->requests;
433 if( req == NULL ) return;
434 procrWithReq->requests = procrWithReq->requests->nextReqst;
435 VMS__free_request( req );
436 }
439 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
440 // of a request -- IE call with both a virt procr and a fn-ptr to request
441 // freer (also maybe put sem request freer as a field in virt procr?)
442 //VMSHW relies right now on this only freeing VMS layer of request -- the
443 // semantic portion of request is alloc'd and freed by request handler
444 void
445 VMS__free_request( VMSReqst *req )
446 {
447 free( req );
448 }
450 VMSReqst *
451 VMS__take_top_request_from( VirtProcr *procrWithReq )
452 { VMSReqst *req;
454 req = procrWithReq->requests;
455 if( req == NULL ) return req;
457 procrWithReq->requests = procrWithReq->requests->nextReqst;
458 return req;
459 }
461 inline int
462 VMS__isSemanticReqst( VMSReqst *req )
463 {
464 return ( req->reqType == semantic );
465 }
468 inline void *
469 VMS__take_sem_reqst_from( VMSReqst *req )
470 {
471 return req->semReqData;
472 }
474 inline int
475 VMS__isDissipateReqst( VMSReqst *req )
476 {
477 return ( req->reqType == dissipate );
478 }
480 inline int
481 VMS__isCreateReqst( VMSReqst *req )
482 {
483 return ( req->reqType == regCreated );
484 }
486 void
487 VMS__send_register_new_procr_request(VirtProcr *newPr, VirtProcr *reqstingPr)
488 { VMSReqst *req;
490 req = malloc( sizeof(VMSReqst) );
491 req->reqType = regCreated;
492 req->semReqData = newPr;
493 req->nextReqst = reqstingPr->requests;
494 reqstingPr->requests = req;
496 VMS__suspend_procr( reqstingPr );
497 }
501 /*This must be called by the request handler plugin -- it cannot be called
502 * from the semantic library "dissipate processor" function -- instead, the
503 * semantic layer has to generate a request for the plug-in to call this
504 * function.
505 *The reason is that this frees the virtual processor's stack -- which is
506 * still in use inside semantic library calls!
507 *
508 *This frees or recycles all the state owned by and comprising the VMS
509 * portion of the animating virtual procr. The request handler must first
510 * free any semantic data created for the processor that didn't use the
511 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
512 * system to disown any state that did use VMS_malloc, and then frees the
513 * statck and the processor-struct itself.
514 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
515 * state, then that state gets freed (or sent to recycling) as a side-effect
516 * of dis-owning it.
517 */
518 void
519 VMS__handle_dissipate_reqst( VirtProcr *animatingPr )
520 {
521 //dis-own all locations owned by this processor, causing to be freed
522 // any locations that it is (was) sole owner of
523 //TODO: implement VMS__malloc system, including "give up ownership"
525 //The dissipate request might still be attached, so remove and free it
526 VMS__remove_and_free_top_request( animatingPr );
528 //NOTE: initialData was given to the processor, so should either have
529 // been alloc'd with VMS__malloc, or freed by the level above animPr.
530 //So, all that's left to free here is the stack and the VirtProcr struc
531 // itself
532 free( animatingPr->startOfStack );
533 free( animatingPr );
534 }
537 //TODO: re-architect so that have clean separation between request handler
538 // and master loop, for dissipate, create, shutdown, and other non-semantic
539 // requests. Issue is chain: one removes requests from AppVP, one dispatches
540 // on type of request, and one handles each type.. but some types require
541 // action from both request handler and master loop -- maybe just give the
542 // request handler calls like: VMS__handle_X_request_type
544 void
545 endOSThreadFn( void *initData, VirtProcr *animatingPr );
547 /*This is called by the semantic layer's request handler when it decides its
548 * time to shut down the VMS system. Calling this causes the core loop OS
549 * threads to exit, which unblocks the entry-point function that started up
550 * VMS, and allows it to grab the result and return to the original single-
551 * threaded application.
552 *
553 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
554 * and-wait function has to free a bunch of stuff after it detects the
555 * threads have all died: the masterEnv, the thread-related locations,
556 * masterVP any AppVPs that might still be allocated and sitting in the
557 * semantic environment, or have been orphaned in the _VMSWorkQ.
558 *
559 *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the
560 * locations it needs, and give ownership to masterVP. Then, they will be
561 * automatically freed when the masterVP is dissipated. (This happens after
562 * the core loop threads have all exited)
563 *
564 *In here,create one core-loop shut-down processor for each core loop and put
565 * them all directly into the workQ.
566 *Note, this function can ONLY be called after the semantic environment no
567 * longer cares if AppVPs get animated after the point this is called. In
568 * other words, this can be used as an abort, or else it should only be
569 * called when all AppVPs have finished dissipate requests -- only at that
570 * point is it sure that all results have completed.
571 */
572 void
573 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr )
574 { int coreIdx;
575 VirtProcr *shutDownPr;
576 VMSQueueStruc *workQ = _VMSWorkQ;
578 //create the shutdown processors, one for each core loop -- put them
579 // directly into _VMSWorkQ -- each core will die when gets one, so
580 // the system distributes them evenly itself.
581 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
582 {
583 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
584 writeVMSQ( shutDownPr, workQ );
585 }
587 }
590 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
591 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
592 *This function has the sole purpose of setting the stack and framePtr
593 * to the coreLoop's stack and framePtr.. it does that then jumps to the
594 * core loop's shutdown point -- might be able to just call Pthread_exit
595 * from here, but am going back to the pthread's stack and setting everything
596 * up just as if it never jumped out, before calling pthread_exit.
597 *The end-point of core loop will free the stack and so forth of the
598 * processor that animates this function, (this fn is transfering the
599 * animator of the AppVP that is in turn animating this function over
600 * to core loop function -- note that this slices out a level of virtual
601 * processors).
602 */
603 void
604 endOSThreadFn( void *initData, VirtProcr *animatingPr )
605 { void *jmpPt, *coreLoopStackPtr, *coreLoopFramePtr;
607 jmpPt = _VMSMasterEnv->coreLoopEndPt;
608 coreLoopStackPtr = animatingPr->coreLoopStackPtr;
609 coreLoopFramePtr = animatingPr->coreLoopFramePtr;
612 asm volatile("movl %0, %%eax; \
613 movl %1, %%esp; \
614 movl %2, %%ebp; \
615 jmp %%eax " \
616 /* outputs */ : \
617 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
618 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
619 );
620 }
624 /*This is called after the threads have shut down and control as returned
625 * to the semantic layer, in the entry point function in the main thread.
626 * It has to free anything allocated during VMS_init, and any other alloc'd
627 * locations that might be left over.
628 */
629 void
630 VMS__cleanup_after_shutdown()
631 { int i;
633 free( _VMSWorkQ );
634 free( _VMSMasterEnv->filledSlots );
635 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
636 {
637 free( _VMSMasterEnv->schedSlots[i] );
638 }
640 free( _VMSMasterEnv->schedSlots);
641 VMS__handle_dissipate_reqst( _VMSMasterEnv->masterVirtPr );
643 free( _VMSMasterEnv );
644 }
647 //===========================================================================
649 inline TSCount getTSCount()
650 { unsigned int low, high;
651 TSCount out;
653 saveTimeStampCountInto( low, high );
654 out = high;
655 out = (out << 32) + low;
656 return out;
657 }