view VMS.c @ 25:c556193f7211

Linux Version -- first set of mods changing from win to linux
author Me
date Sat, 24 Jul 2010 08:58:47 -0700
parents 2b161e1a50ee
children 668278fa7a63
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
15 //===========================================================================
16 void
17 shutdownFn( void *dummy, VirtProcr *dummy2 );
19 void
20 create_sched_slots( MasterEnv *masterEnv );
22 //===========================================================================
24 /*Setup has two phases:
25 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
26 * the master virt procr into the work-queue, ready for first "call"
27 * 2) Semantic layer then does its own init, which creates the seed virt
28 * procr inside the semantic layer, ready to schedule it when
29 * asked by the first run of the masterLoop.
30 *
31 *This part is bit weird because VMS really wants to be "always there", and
32 * have applications attach and detach.. for now, this VMS is part of
33 * the app, so the VMS system starts up as part of running the app.
34 *
35 *The semantic layer is isolated from the VMS internals by making the
36 * semantic layer do setup to a state that it's ready with its
37 * initial virt procrs, ready to schedule them to slots when the masterLoop
38 * asks. Without this pattern, the semantic layer's setup would
39 * have to modify slots directly to assign the initial virt-procrs, and put
40 * them into the workQ itself, breaking the isolation completely.
41 *
42 *
43 *The semantic layer creates the initial virt procr(s), and adds its
44 * own environment to masterEnv, and fills in the pointers to
45 * the requestHandler and slaveScheduler plug-in functions
46 */
48 /*This allocates VMS data structures, populates the master VMSProc,
49 * and master environment, and returns the master environment to the semantic
50 * layer.
51 */
52 void
53 VMS__init()
54 { MasterEnv *masterEnv;
55 CASQueueStruc *workQ;
57 //Make the central work-queue
58 _VMSWorkQ = makeCASQ();
59 workQ = _VMSWorkQ;
61 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
62 masterEnv = _VMSMasterEnv;
64 //create the master virtual processor
65 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
67 create_sched_slots( masterEnv );
69 //Set slot 0 to be the master virt procr & set flags just in case
70 masterEnv->schedSlots[0]->needsProcrAssigned = FALSE; //says don't touch
71 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch
72 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr;
73 masterEnv->masterVirtPr->schedSlot = masterEnv->schedSlots[0];
74 masterEnv->stillRunning = FALSE;
76 //First core loop to start up gets this, which will schedule seed Pr
77 //TODO: debug: check address of masterVirtPr
78 writeCASQ( masterEnv->masterVirtPr, workQ );
80 numProcrsCreated = 1;
82 //========================================================================
83 // Create the Threads
84 int coreIdx, retCode;
85 #define thdAttrs NULL
87 _VMSMasterEnv->setupComplete = 0;
88 _VMSMasterEnv->suspend_mutex = PTHREAD_MUTEX_INITIALIZER;
89 _VMSMasterEnv->suspend_cond = PTHREAD_COND_INITIALIZER;
91 //Need the threads to be created suspended, and wait for a signal
92 // before proceeding -- gives time after creating to initialize other
93 // stuff before the coreLoops set off.
95 //Make params given to the win threads that animate the core loops
96 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
97 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) );
98 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
100 retCode =
101 pthread_create( &(coreLoopThdHandles[coreIdx]),
102 thdAttrs,
103 &coreLoop,
104 (void *)(coreLoopThdParams[coreIdx]) );
105 if(!retCode){printf("ERROR creating thread: %d\n", retCode); exit();}
106 }
109 }
111 void
112 create_sched_slots( MasterEnv *masterEnv )
113 { SchedSlot **schedSlots, **filledSlots;
114 int i;
116 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
117 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
118 masterEnv->schedSlots = schedSlots;
119 masterEnv->filledSlots = filledSlots;
121 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
122 {
123 schedSlots[i] = malloc( sizeof(SchedSlot) );
125 //Set state to mean "handling requests done, slot needs filling"
126 schedSlots[i]->workIsDone = FALSE;
127 schedSlots[i]->needsProcrAssigned = TRUE;
128 }
129 }
132 /*Semantic layer calls this when it want the system to start running..
133 *
134 *This starts the core loops running then waits for them to exit.
135 */
136 void
137 VMS__start_the_work_then_wait_until_done()
138 { int coreIdx;
139 //Start the core loops running
140 //===========================================================================
141 TSCount startCount, endCount;
142 unsigned long long count = 0, freq = 0;
143 double runTime;
145 startCount = getTSCount();
147 //tell the core loop threads that setup is complete
148 //get lock, to lock out any threads still starting up -- they'll see
149 // that setupComplete is true before entering while loop, and so never
150 // wait on the condition
151 pthread_mutex_lock( _VMSMasterEnv->suspend_mutex );
152 _VMSMasterEnv->setupComplete = 1;
153 pthread_mutex_unlock( _VMSMasterEnv->suspend_mutex );
154 pthread_cond_broadcast( _VMSMasterEnv->suspend_cond );
157 //wait for all to complete
158 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
159 {
160 pthread_join( coreLoopThdHandles[coreIdx], NULL );
161 }
163 //NOTE: do not clean up VMS env here -- semantic layer has to have
164 // a chance to clean up its environment first, then do a call to free
165 // the Master env and rest of VMS locations
168 endCount = getTSCount();
169 count = endCount - startCount;
171 runTime = (double)count / (double)TSCOUNT_FREQ;
173 printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
174 }
178 /*Create stack, then create __cdecl structure on it and put initialData and
179 * pointer to the new structure instance into the parameter positions on
180 * the stack
181 *Then put function pointer into nextInstrPt -- the stack is setup in std
182 * call structure, so jumping to function ptr is same as a GCC generated
183 * function call
184 *No need to save registers on old stack frame, because there's no old
185 * animator state to return to --
186 *
187 */
188 VirtProcr *
189 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
190 { VirtProcr *newPr;
191 char *stackLocs, *stackPtr;
193 newPr = malloc( sizeof(VirtProcr) );
194 newPr->procrID = numProcrsCreated++;
195 newPr->nextInstrPt = fnPtr;
196 newPr->initialData = initialData;
198 //fnPtr takes two params -- void *initData & void *animProcr
199 //alloc stack locations, make stackPtr be the highest addr minus room
200 // for 2 params + return addr. Return addr (NULL) is in loc pointed to
201 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above
202 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
203 newPr->startOfStack = stackLocs;
204 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 );
205 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
206 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer
207 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left
208 newPr->stackPtr = stackPtr; //core loop will switch to this, then
209 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
211 return newPr;
212 }
215 /*there is a label inside this function -- save the addr of this label in
216 * the callingPr struc, as the pick-up point from which to start the next
217 * work-unit for that procr. If turns out have to save registers, then
218 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
219 * "done with work-unit" label. The procr struc is in the request in the
220 * slave that animated the just-ended work-unit, so all the state is saved
221 * there, and will get passed along, inside the request handler, to the
222 * next work-unit for that procr.
223 */
224 void
225 VMS__suspend_procr( VirtProcr *callingPr )
226 { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr;
227 void *coreLoopFramePtr;
229 //The request to master will cause this suspended virt procr to get
230 // scheduled again at some future point -- to resume, core loop jumps
231 // to the resume point (below), which causes restore of saved regs and
232 // "return" from this call.
233 callingPr->nextInstrPt = &&ResumePt;
235 //return ownership of the virt procr and sched slot to Master virt pr
236 callingPr->schedSlot->workIsDone = TRUE;
237 // coreIdx = callingPr->coreAnimatedBy;
239 stackPtrAddr = &(callingPr->stackPtr);
240 framePtrAddr = &(callingPr->framePtr);
242 jmpPt = callingPr->coreLoopStartPt;
243 coreLoopFramePtr = callingPr->coreLoopFramePtr;//need this only
244 coreLoopStackPtr = callingPr->coreLoopStackPtr;//shouldn't need -- safety
246 //Save the virt procr's stack and frame ptrs, restore coreloop's frame
247 // ptr, then jump back to "start" of core loop
248 //Note, GCC compiles to assembly that saves esp and ebp in the stack
249 // frame -- so have to explicitly do assembly that saves to memory
250 asm volatile("movl %0, %%eax; \
251 movl %%esp, (%%eax); \
252 movl %1, %%eax; \
253 movl %%ebp, (%%eax); \
254 movl %2, %%eax; \
255 movl %3, %%esp; \
256 movl %4, %%ebp; \
257 jmp %%eax " \
258 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \
259 /* inputs */ : "g" (jmpPt), "g"(coreLoopStackPtr), "g"(coreLoopFramePtr)\
260 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
261 ); //list everything as clobbered to force GCC to save all
262 // live vars that are in regs on stack before this
263 // assembly, so that stack pointer is correct, before jmp
265 ResumePt:
266 return;
267 }
271 /*This is equivalent to "jump back to core loop" -- it's mainly only used
272 * just after adding dissipate request to a processor -- so the semantic
273 * layer is the only place it will be seen and/or used.
274 *
275 *It does almost the same thing as suspend, except don't need to save the
276 * stack nor set the nextInstrPt
277 *
278 *As of June 30, 2010 just implementing as a call to suspend -- just sugar
279 */
280 void
281 VMS__return_from_fn( VirtProcr *animatingPr )
282 {
283 VMS__suspend_procr( animatingPr );
284 }
287 /*Not sure yet the form going to put "dissipate" in, so this is the third
288 * possibility -- the semantic layer can just make a macro that looks like
289 * a call to its name, then expands to a call to this.
290 *
291 *As of June 30, 2010 this looks like the top choice..
292 *
293 *This adds a request to dissipate, then suspends the processor so that the
294 * request handler will receive the request. The request handler is what
295 * does the work of freeing memory and removing the processor from the
296 * semantic environment's data structures.
297 *The request handler also is what figures out when to shutdown the VMS
298 * system -- which causes all the core loop threads to die, and returns from
299 * the call that started up VMS to perform the work.
300 *
301 *This form is a bit misleading to understand if one is trying to figure out
302 * how VMS works -- it looks like a normal function call, but inside it
303 * sends a request to the request handler and suspends the processor, which
304 * jumps out of the VMS__dissipate_procr function, and out of all nestings
305 * above it, transferring the work of dissipating to the request handler,
306 * which then does the actual work -- causing the processor that animated
307 * the call of this function to disappear and the "hanging" state of this
308 * function to just poof into thin air -- the virtual processor's trace
309 * never returns from this call, but instead the virtual processor's trace
310 * gets suspended in this call and all the virt processor's state disap-
311 * pears -- making that suspend the last thing in the virt procr's trace.
312 */
313 void
314 VMS__dissipate_procr( VirtProcr *procrToDissipate )
315 { VMSReqst *req;
317 req = malloc( sizeof(VMSReqst) );
318 // req->virtProcrFrom = callingPr;
319 req->reqType = dissipate;
320 req->nextReqst = procrToDissipate->requests;
321 procrToDissipate->requests = req;
323 VMS__suspend_procr( procrToDissipate );
324 }
327 /*This inserts the semantic-layer's request data into standard VMS carrier
328 */
329 inline void
330 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
331 { VMSReqst *req;
333 req = malloc( sizeof(VMSReqst) );
334 // req->virtProcrFrom = callingPr;
335 req->reqType = semantic;
336 req->semReqData = semReqData;
337 req->nextReqst = callingPr->requests;
338 callingPr->requests = req;
339 }
343 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
344 // of a request -- IE call with both a virt procr and a fn-ptr to request
345 // freer (or maybe put request freer as a field in virt procr?)
346 void
347 VMS__remove_and_free_top_request( VirtProcr *procrWithReq )
348 { VMSReqst *req;
350 req = procrWithReq->requests;
351 procrWithReq->requests = procrWithReq->requests->nextReqst;
352 free( req );
353 }
356 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
357 // of a request -- IE call with both a virt procr and a fn-ptr to request
358 // freer (also maybe put sem request freer as a field in virt procr?)
359 void
360 VMS__free_request( VMSReqst *req )
361 {
362 free( req );
363 }
365 VMSReqst *
366 VMS__take_top_request_from( VirtProcr *procrWithReq )
367 { VMSReqst *req;
369 req = procrWithReq->requests;
370 if( req == NULL ) return req;
372 procrWithReq->requests = procrWithReq->requests->nextReqst;
373 return req;
374 }
376 inline int
377 VMS__isSemanticReqst( VMSReqst *req )
378 {
379 return ( req->reqType == semantic );
380 }
383 inline void *
384 VMS__take_sem_reqst_from( VMSReqst *req )
385 {
386 return req->semReqData;
387 }
389 inline int
390 VMS__isDissipateReqst( VMSReqst *req )
391 {
392 return ( req->reqType == dissipate );
393 }
395 inline int
396 VMS__isCreateReqst( VMSReqst *req )
397 {
398 return ( req->reqType == regCreated );
399 }
401 void
402 VMS__send_register_new_procr_request(VirtProcr *newPr, VirtProcr *reqstingPr)
403 { VMSReqst *req;
405 req = malloc( sizeof(VMSReqst) );
406 req->reqType = regCreated;
407 req->semReqData = newPr;
408 req->nextReqst = reqstingPr->requests;
409 reqstingPr->requests = req;
411 VMS__suspend_procr( reqstingPr );
412 }
415 /*The semantic layer figures out when the work is done ( perhaps by a call
416 * in the application to "work all done", or perhaps all the virtual
417 * processors have dissipated.. a.s.o. )
418 *
419 *The semantic layer is responsible for making sure all work has fully
420 * completed before using this to shutdown the VMS system.
421 *
422 *After the semantic layer has determined it wants to shut down, the
423 * next time the Master Loop calls the scheduler plug-in, the scheduler
424 * then calls this function and returns the virtual processor it gets back.
425 *
426 *When the shut-down processor runs, it first frees all locations malloc'd to
427 * the VMS system (that wasn't
428 * specified as return-locations). Then it creates one core-loop shut-down
429 * processor for each core loop and puts them all into the workQ. When a
430 * core loop animates a core loop shut-down processor, it causes exit-thread
431 * to run, and when all core loop threads have exited, then the "wait for
432 * work to finish" in the main thread is woken, and the function-call that
433 * started all the work returns.
434 *
435 *The function animated by this processor performs the shut-down work.
436 */
437 VirtProcr *
438 VMS__create_the_shutdown_procr()
439 {
440 return VMS__create_procr( &shutdownFn, NULL );
441 }
444 /*This must be called by the request handler plugin -- it cannot be called
445 * from the semantic library "dissipate processor" function -- instead, the
446 * semantic layer has to generate a request for the plug-in to call this
447 * function.
448 *The reason is that this frees the virtual processor's stack -- which is
449 * still in use inside semantic library calls!
450 *
451 *This frees or recycles all the state owned by and comprising the VMS
452 * portion of the animating virtual procr. The request handler must first
453 * free any semantic data created for the processor that didn't use the
454 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
455 * system to disown any state that did use VMS_malloc, and then frees the
456 * statck and the processor-struct itself.
457 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
458 * state, then that state gets freed (or sent to recycling) as a side-effect
459 * of dis-owning it.
460 */
461 void
462 VMS__free_procr_locs( VirtProcr *animatingPr )
463 {
464 //dis-own all locations owned by this processor, causing to be freed
465 // any locations that it is (was) sole owner of
466 //TODO: implement VMS__malloc system, including "give up ownership"
468 //The dissipate request might still be attached, so remove and free it
469 VMS__remove_and_free_top_request( animatingPr );
470 free( animatingPr->startOfStack );
472 //NOTE: initialData was given to the processor, so should either have
473 // been alloc'd with VMS__malloc, or freed by the level above animPr.
474 //So, all that's left to free here is the stack and the VirtProcr struc
475 // itself
476 free( animatingPr->startOfStack );
477 free( animatingPr );
478 }
482 /*This is the function run by the special "shut-down" processor
483 *
484 *The _VMSMasterEnv is needed by this shut down function, so the "wait"
485 * function run in the main loop has to free it, and the thread-related
486 * locations (coreLoopThdParams a.s.o.).
487 *However, the semantic environment and all data malloc'd to VMS can be
488 * freed here.
489 *
490 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
491 * locations it needs -- they will be automatically freed by the standard
492 * "free all owned locations"
493 *
494 *Free any locations malloc'd to the VMS system (that weren't
495 * specified as return-locations).
496 *Then create one core-loop shut-down processor for each core loop and puts
497 * them all into the workQ.
498 */
499 void
500 shutdownFn( void *dummy, VirtProcr *animatingPr )
501 { int coreIdx;
502 VirtProcr *shutDownPr;
503 CASQueueStruc *workQ = _VMSWorkQ;
505 //free all the locations owned within the VMS system
506 //TODO: write VMS__malloc and free.. -- take the DKU malloc as starting pt
508 //make the core loop shut-down processors and put them into the workQ
509 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
510 {
511 shutDownPr = VMS__create_procr( NULL, NULL );
512 shutDownPr->nextInstrPt = _VMSMasterEnv->coreLoopShutDownPt;
513 writeCASQ( shutDownPr, workQ );
514 }
516 //This is an issue: the animating processor of this function may not
517 // get its request handled before all the cores have shutdown.
518 //TODO: after all the threads stop, clean out the MasterEnv, the
519 // SemanticEnv, and the workQ before returning.
520 VMS__dissipate_procr( animatingPr ); //will never come back from this
521 }
524 /*This has to free anything allocated during VMS_init, and any other alloc'd
525 * locations that might be left over.
526 */
527 void
528 VMS__shutdown()
529 { int i;
531 free( _VMSWorkQ );
532 free( _VMSMasterEnv->filledSlots );
533 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
534 {
535 free( _VMSMasterEnv->schedSlots[i] );
536 }
538 free( _VMSMasterEnv->schedSlots);
539 VMS__free_procr_locs( _VMSMasterEnv->masterVirtPr );
541 free( _VMSMasterEnv );
542 }
545 //===========================================================================
547 inline TSCount getTSCount()
548 { unsigned int low, high;
549 TSCount out;
551 saveTimeStampCountInto( low, high );
552 out = high;
553 out = (out << 32) + low;
554 return out;
555 }