Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
comparison VMS.c @ 50:8f7141a9272e
Added VMS__malloc and probes, and major re-factoring to separate mallocs
| author | Me |
|---|---|
| date | Sat, 30 Oct 2010 20:54:36 -0700 |
| parents | cf3e9238aeb0 |
| children | f59cfa31a579 |
comparison
equal
deleted
inserted
replaced
| 18:aa82127c7e20 | 21:e38566a22f76 |
|---|---|
| 4 * Licensed under BSD | 4 * Licensed under BSD |
| 5 */ | 5 */ |
| 6 | 6 |
| 7 #include <stdio.h> | 7 #include <stdio.h> |
| 8 #include <stdlib.h> | 8 #include <stdlib.h> |
| 9 #include <string.h> | |
| 9 #include <malloc.h> | 10 #include <malloc.h> |
| 11 #include <sys/time.h> | |
| 10 | 12 |
| 11 #include "VMS.h" | 13 #include "VMS.h" |
| 12 #include "Queue_impl/BlockingQueue.h" | 14 #include "Queue_impl/BlockingQueue.h" |
| 13 #include "Histogram/Histogram.h" | 15 #include "Histogram/Histogram.h" |
| 14 | 16 |
| 25 void | 27 void |
| 26 create_masterEnv(); | 28 create_masterEnv(); |
| 27 | 29 |
| 28 void | 30 void |
| 29 create_the_coreLoop_OS_threads(); | 31 create_the_coreLoop_OS_threads(); |
| 32 | |
| 33 MallocProlog * | |
| 34 create_free_list(); | |
| 35 | |
| 30 | 36 |
| 31 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; | 37 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; |
| 32 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; | 38 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; |
| 33 | 39 |
| 34 //=========================================================================== | 40 //=========================================================================== |
| 98 | 104 |
| 99 //One array for each core, 3 in array, core's masterVP scheds all | 105 //One array for each core, 3 in array, core's masterVP scheds all |
| 100 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) ); | 106 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) ); |
| 101 | 107 |
| 102 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 108 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 103 { | 109 { //running in main thread -- normal malloc inside makeSRSWQ |
| 104 readyToAnimateQs[ coreIdx ] = makeSRSWQ(); | 110 readyToAnimateQs[ coreIdx ] = makeSRSWQ(); |
| 105 | 111 |
| 106 //Q: should give masterVP core-specific into as its init data? | 112 //Q: should give masterVP core-specific info as its init data? |
| 107 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv ); | 113 masterVPs[ coreIdx ] = VMS_ext__create_procr( &masterLoop, masterEnv ); |
| 108 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; | 114 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; |
| 109 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core | 115 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core |
| 116 _VMSMasterEnv->numMasterInARow[ coreIdx ] = FALSE; | |
| 110 } | 117 } |
| 111 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; | 118 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; |
| 112 _VMSMasterEnv->masterVPs = masterVPs; | 119 _VMSMasterEnv->masterVPs = masterVPs; |
| 120 _VMSMasterEnv->masterLock = UNLOCKED; | |
| 113 _VMSMasterEnv->allSchedSlots = allSchedSlots; | 121 _VMSMasterEnv->allSchedSlots = allSchedSlots; |
| 114 | 122 _VMSMasterEnv->numProcrsCreated = 0; |
| 115 | 123 |
| 116 | 124 |
| 117 //Aug 19, 2010: no longer need to place initial masterVP into queue | 125 //Aug 19, 2010: no longer need to place initial masterVP into queue |
| 118 // because coreLoop now controls -- animates its masterVP when no work | 126 // because coreLoop now controls -- animates its masterVP when no work |
| 119 | 127 |
| 120 | 128 _VMSMasterEnv->freeListHead = VMS__create_free_list(); |
| 121 //==================== malloc substitute ======================== | 129 _VMSMasterEnv->amtOfOutstandingMem = 0; //none allocated yet |
| 122 // | 130 |
| 123 //Testing whether malloc is using thread-local storage and therefore | 131 //============================= MEASUREMENT STUFF ======================== |
| 124 // causing unreliable behavior. | 132 #ifdef STATS__TURN_ON_PROBES |
| 125 //Just allocate a massive chunk of memory and roll own malloc/free and | 133 //creates intervalProbes array and sets pointer to it in masterEnv too |
| 126 // make app use VMS__malloc_to, which will suspend and perform malloc | 134 _VMSMasterEnv->dynIntervalProbesInfo = |
| 127 // in the master, taking from this massive chunk. | 135 makeDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 20 ); |
| 128 | 136 |
| 129 // initFreeList(); | 137 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, NULL ); |
| 130 | 138 _VMSMasterEnv->masterCreateProbeID = |
| 131 } | 139 VMS_ext__record_time_point_into_new_probe( "masterCreateProbe" ); |
| 132 | 140 //Also put creation time directly into master env, for fast retrieval |
| 133 /* | 141 struct timeval timeStamp; |
| 134 void | 142 gettimeofday( &(timeStamp), NULL); |
| 135 initMasterMalloc() | 143 _VMSMasterEnv->createPtInSecs = |
| 136 { | 144 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0); |
| 137 _VMSMasterEnv->mallocChunk = malloc( MASSIVE_MALLOC_SIZE ); | 145 #endif |
| 138 | 146 //======================================================================== |
| 139 //The free-list element is the first several locations of an | 147 |
| 140 // allocated chunk -- the address given to the application is pre- | 148 } |
| 141 // pended with both the ownership structure and the free-list struc. | |
| 142 //So, write the values of these into the first locations of | |
| 143 // mallocChunk -- which marks it as free & puts in its size. | |
| 144 listElem = (FreeListElem *)_VMSMasterEnv->mallocChunk; | |
| 145 listElem->size = MASSIVE_MALLOC_SIZE - NUM_PREPEND_BYTES | |
| 146 listElem->next = NULL; | |
| 147 } | |
| 148 | |
| 149 void | |
| 150 dissipateMasterMalloc() | |
| 151 { | |
| 152 //Just foo code -- to get going -- doing as if free list were link-list | |
| 153 currElem = _VMSMasterEnv->freeList; | |
| 154 while( currElem != NULL ) | |
| 155 { | |
| 156 nextElem = currElem->next; | |
| 157 masterFree( currElem ); | |
| 158 currElem = nextElem; | |
| 159 } | |
| 160 free( _VMSMasterEnv->freeList ); | |
| 161 } | |
| 162 */ | |
| 163 | 149 |
| 164 SchedSlot ** | 150 SchedSlot ** |
| 165 create_sched_slots() | 151 create_sched_slots() |
| 166 { SchedSlot **schedSlots; | 152 { SchedSlot **schedSlots; |
| 167 int i; | 153 int i; |
| 211 retCode = | 197 retCode = |
| 212 pthread_create( &(coreLoopThdHandles[coreIdx]), | 198 pthread_create( &(coreLoopThdHandles[coreIdx]), |
| 213 thdAttrs, | 199 thdAttrs, |
| 214 &coreLoop, | 200 &coreLoop, |
| 215 (void *)(coreLoopThdParams[coreIdx]) ); | 201 (void *)(coreLoopThdParams[coreIdx]) ); |
| 216 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);} | 202 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);} |
| 217 } | 203 } |
| 218 } | 204 } |
| 219 | 205 |
| 220 /*Semantic layer calls this when it want the system to start running.. | 206 /*Semantic layer calls this when it want the system to start running.. |
| 221 * | 207 * |
| 223 */ | 209 */ |
| 224 void | 210 void |
| 225 VMS__start_the_work_then_wait_until_done() | 211 VMS__start_the_work_then_wait_until_done() |
| 226 { int coreIdx; | 212 { int coreIdx; |
| 227 //Start the core loops running | 213 //Start the core loops running |
| 228 //=========================================================================== | |
| 229 TSCount startCount, endCount; | |
| 230 unsigned long long count = 0, freq = 0; | |
| 231 double runTime; | |
| 232 | |
| 233 startCount = getTSCount(); | |
| 234 | 214 |
| 235 //tell the core loop threads that setup is complete | 215 //tell the core loop threads that setup is complete |
| 236 //get lock, to lock out any threads still starting up -- they'll see | 216 //get lock, to lock out any threads still starting up -- they'll see |
| 237 // that setupComplete is true before entering while loop, and so never | 217 // that setupComplete is true before entering while loop, and so never |
| 238 // wait on the condition | 218 // wait on the condition |
| 249 } | 229 } |
| 250 | 230 |
| 251 //NOTE: do not clean up VMS env here -- semantic layer has to have | 231 //NOTE: do not clean up VMS env here -- semantic layer has to have |
| 252 // a chance to clean up its environment first, then do a call to free | 232 // a chance to clean up its environment first, then do a call to free |
| 253 // the Master env and rest of VMS locations | 233 // the Master env and rest of VMS locations |
| 254 | |
| 255 | |
| 256 endCount = getTSCount(); | |
| 257 count = endCount - startCount; | |
| 258 | |
| 259 runTime = (double)count / (double)TSCOUNT_FREQ; | |
| 260 | |
| 261 printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin ); | |
| 262 } | 234 } |
| 263 | 235 |
| 264 /*Only difference between version with an OS thread pinned to each core and | 236 /*Only difference between version with an OS thread pinned to each core and |
| 265 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq. | 237 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq. |
| 266 */ | 238 */ |
| 283 * function call | 255 * function call |
| 284 *No need to save registers on old stack frame, because there's no old | 256 *No need to save registers on old stack frame, because there's no old |
| 285 * animator state to return to -- | 257 * animator state to return to -- |
| 286 * | 258 * |
| 287 */ | 259 */ |
| 288 VirtProcr * | 260 inline VirtProcr * |
| 289 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | 261 create_procr_helper( VirtProcr *newPr, VirtProcrFnPtr fnPtr, |
| 290 { VirtProcr *newPr; | 262 void *initialData, char *stackLocs ) |
| 291 char *stackLocs, *stackPtr; | 263 { |
| 292 | 264 char *stackPtr; |
| 293 newPr = malloc( sizeof(VirtProcr) ); | 265 |
| 294 newPr->procrID = numProcrsCreated++; | 266 newPr->procrID = _VMSMasterEnv->numProcrsCreated++; |
| 295 newPr->nextInstrPt = fnPtr; | 267 newPr->nextInstrPt = fnPtr; |
| 296 newPr->initialData = initialData; | 268 newPr->initialData = initialData; |
| 297 newPr->requests = NULL; | 269 newPr->requests = NULL; |
| 298 newPr->schedSlot = NULL; | 270 newPr->schedSlot = NULL; |
| 299 // newPr->coreLoopStartPt = _VMSMasterEnv->coreLoopStartPt; | |
| 300 | 271 |
| 301 //fnPtr takes two params -- void *initData & void *animProcr | 272 //fnPtr takes two params -- void *initData & void *animProcr |
| 302 //alloc stack locations, make stackPtr be the highest addr minus room | 273 //alloc stack locations, make stackPtr be the highest addr minus room |
| 303 // for 2 params + return addr. Return addr (NULL) is in loc pointed to | 274 // for 2 params + return addr. Return addr (NULL) is in loc pointed to |
| 304 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above | 275 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above |
| 305 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); | |
| 306 if(stackLocs == 0) | |
| 307 {perror("malloc stack"); exit(1);} | |
| 308 newPr->startOfStack = stackLocs; | |
| 309 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 ); | 276 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 ); |
| 277 | |
| 310 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp | 278 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp |
| 311 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer | 279 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer |
| 312 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left | 280 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left |
| 313 newPr->stackPtr = stackPtr; //core loop will switch to this, then | 281 newPr->stackPtr = stackPtr; //core loop will switch to this, then |
| 314 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr | 282 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr |
| 315 | 283 |
| 284 //============================= MEASUREMENT STUFF ======================== | |
| 285 #ifdef STATS__TURN_ON_PROBES | |
| 286 struct timeval timeStamp; | |
| 287 gettimeofday( &(timeStamp), NULL); | |
| 288 newPr->createPtInSecs = timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0); | |
| 289 #endif | |
| 290 //======================================================================== | |
| 291 | |
| 316 return newPr; | 292 return newPr; |
| 293 } | |
| 294 | |
| 295 inline VirtProcr * | |
| 296 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | |
| 297 { VirtProcr *newPr; | |
| 298 char *stackLocs; | |
| 299 | |
| 300 newPr = VMS__malloc( sizeof(VirtProcr) ); | |
| 301 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE ); | |
| 302 if( stackLocs == 0 ) | |
| 303 { perror("VMS__malloc stack"); exit(1); } | |
| 304 newPr->startOfStack = stackLocs; | |
| 305 | |
| 306 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | |
| 307 } | |
| 308 | |
| 309 /* "ext" designates that it's for use outside the VMS system -- should only | |
| 310 * be called from main thread or other thread -- never from code animated by | |
| 311 * a VMS virtual processor. | |
| 312 */ | |
| 313 inline VirtProcr * | |
| 314 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | |
| 315 { VirtProcr *newPr; | |
| 316 char *stackLocs; | |
| 317 | |
| 318 newPr = malloc( sizeof(VirtProcr) ); | |
| 319 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); | |
| 320 if( stackLocs == 0 ) | |
| 321 { perror("malloc stack"); exit(1); } | |
| 322 newPr->startOfStack = stackLocs; | |
| 323 | |
| 324 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | |
| 317 } | 325 } |
| 318 | 326 |
| 319 | 327 |
| 320 /*there is a label inside this function -- save the addr of this label in | 328 /*there is a label inside this function -- save the addr of this label in |
| 321 * the callingPr struc, as the pick-up point from which to start the next | 329 * the callingPr struc, as the pick-up point from which to start the next |
| 337 // "return" from this call. | 345 // "return" from this call. |
| 338 animatingPr->nextInstrPt = &&ResumePt; | 346 animatingPr->nextInstrPt = &&ResumePt; |
| 339 | 347 |
| 340 //return ownership of the virt procr and sched slot to Master virt pr | 348 //return ownership of the virt procr and sched slot to Master virt pr |
| 341 animatingPr->schedSlot->workIsDone = TRUE; | 349 animatingPr->schedSlot->workIsDone = TRUE; |
| 342 // coreIdx = callingPr->coreAnimatedBy; | |
| 343 | 350 |
| 344 stackPtrAddr = &(animatingPr->stackPtr); | 351 stackPtrAddr = &(animatingPr->stackPtr); |
| 345 framePtrAddr = &(animatingPr->framePtr); | 352 framePtrAddr = &(animatingPr->framePtr); |
| 346 | 353 |
| 347 jmpPt = _VMSMasterEnv->coreLoopStartPt; | 354 jmpPt = _VMSMasterEnv->coreLoopStartPt; |
| 388 return; | 395 return; |
| 389 } | 396 } |
| 390 | 397 |
| 391 | 398 |
| 392 | 399 |
| 400 /*For this implementation of VMS, it may not make much sense to have the | |
| 401 * system of requests for creating a new processor done this way.. but over | |
| 402 * the scope of single-master, multi-master, mult-tasking, OS-implementing, | |
| 403 * distributed-memory, and so on, this gives VMS implementation a chance to | |
| 404 * do stuff before suspend, in the AppVP, and in the Master before the plugin | |
| 405 * is called, as well as in the lang-lib before this is called, and in the | |
| 406 * plugin. So, this gives both VMS and language implementations a chance to | |
| 407 * intercept at various points and do order-dependent stuff. | |
| 408 *Having a standard VMSNewPrReqData struc allows the language to create and | |
| 409 * free the struc, while VMS knows how to get the newPr if it wants it, and | |
| 410 * it lets the lang have lang-specific data related to creation transported | |
| 411 * to the plugin. | |
| 412 */ | |
| 413 void | |
| 414 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr ) | |
| 415 { VMSReqst req; | |
| 416 | |
| 417 req.reqType = createReq; | |
| 418 req.semReqData = semReqData; | |
| 419 req.nextReqst = reqstingPr->requests; | |
| 420 reqstingPr->requests = &req; | |
| 421 | |
| 422 VMS__suspend_procr( reqstingPr ); | |
| 423 } | |
| 424 | |
| 393 | 425 |
| 394 /* | 426 /* |
| 395 *This adds a request to dissipate, then suspends the processor so that the | 427 *This adds a request to dissipate, then suspends the processor so that the |
| 396 * request handler will receive the request. The request handler is what | 428 * request handler will receive the request. The request handler is what |
| 397 * does the work of freeing memory and removing the processor from the | 429 * does the work of freeing memory and removing the processor from the |
| 412 * gets suspended in this call and all the virt processor's state disap- | 444 * gets suspended in this call and all the virt processor's state disap- |
| 413 * pears -- making that suspend the last thing in the virt procr's trace. | 445 * pears -- making that suspend the last thing in the virt procr's trace. |
| 414 */ | 446 */ |
| 415 void | 447 void |
| 416 VMS__dissipate_procr( VirtProcr *procrToDissipate ) | 448 VMS__dissipate_procr( VirtProcr *procrToDissipate ) |
| 417 { VMSReqst *req; | 449 { VMSReqst req; |
| 418 | 450 |
| 419 req = malloc( sizeof(VMSReqst) ); | 451 req.reqType = dissipate; |
| 420 // req->virtProcrFrom = callingPr; | 452 req.nextReqst = procrToDissipate->requests; |
| 421 req->reqType = dissipate; | 453 procrToDissipate->requests = &req; |
| 422 req->nextReqst = procrToDissipate->requests; | 454 |
| 423 procrToDissipate->requests = req; | |
| 424 | |
| 425 VMS__suspend_procr( procrToDissipate ); | 455 VMS__suspend_procr( procrToDissipate ); |
| 426 } | 456 } |
| 457 | |
| 458 | |
| 459 /* "ext" designates that it's for use outside the VMS system -- should only | |
| 460 * be called from main thread or other thread -- never from code animated by | |
| 461 * a VMS virtual processor. | |
| 462 * | |
| 463 *Use this version to dissipate VPs created outside the VMS system. | |
| 464 */ | |
| 465 void | |
| 466 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate ) | |
| 467 { | |
| 468 //NOTE: initialData was given to the processor, so should either have | |
| 469 // been alloc'd with VMS__malloc, or freed by the level above animPr. | |
| 470 //So, all that's left to free here is the stack and the VirtProcr struc | |
| 471 // itself | |
| 472 //Note, should not stack-allocate initial data -- no guarantee, in | |
| 473 // general that creating processor will outlive ones it creates. | |
| 474 free( procrToDissipate->startOfStack ); | |
| 475 free( procrToDissipate ); | |
| 476 } | |
| 477 | |
| 427 | 478 |
| 428 | 479 |
| 429 /*This inserts the semantic-layer's request data into standard VMS carrier | 480 /*This inserts the semantic-layer's request data into standard VMS carrier |
| 481 * request data-struct is allocated on stack of this call & ptr to it sent | |
| 482 * to plugin | |
| 430 */ | 483 */ |
| 431 inline void | 484 inline void |
| 432 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr ) | 485 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr ) |
| 486 { VMSReqst req; | |
| 487 | |
| 488 req.reqType = semantic; | |
| 489 req.semReqData = semReqData; | |
| 490 req.nextReqst = callingPr->requests; | |
| 491 callingPr->requests = &req; | |
| 492 } | |
| 493 | |
| 494 /*This inserts the semantic-layer's request data into standard VMS carrier | |
| 495 * request data-struct is allocated on stack of this call & ptr to it sent | |
| 496 * to plugin | |
| 497 *Then it does suspend, to cause request to be sent. | |
| 498 */ | |
| 499 inline void | |
| 500 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr ) | |
| 501 { VMSReqst req; | |
| 502 | |
| 503 req.reqType = semantic; | |
| 504 req.semReqData = semReqData; | |
| 505 req.nextReqst = callingPr->requests; | |
| 506 callingPr->requests = &req; | |
| 507 | |
| 508 VMS__suspend_procr( callingPr ); | |
| 509 } | |
| 510 | |
| 511 | |
| 512 inline void | |
| 513 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr ) | |
| 514 { VMSReqst req; | |
| 515 | |
| 516 req.reqType = VMSSemantic; | |
| 517 req.semReqData = semReqData; | |
| 518 req.nextReqst = callingPr->requests; //gab any other preceeding | |
| 519 callingPr->requests = &req; | |
| 520 | |
| 521 VMS__suspend_procr( callingPr ); | |
| 522 } | |
| 523 | |
| 524 | |
| 525 /* | |
| 526 */ | |
| 527 VMSReqst * | |
| 528 VMS__take_next_request_out_of( VirtProcr *procrWithReq ) | |
| 433 { VMSReqst *req; | 529 { VMSReqst *req; |
| 434 | 530 |
| 435 req = malloc( sizeof(VMSReqst) ); | |
| 436 // req->virtProcrFrom = callingPr; | |
| 437 req->reqType = semantic; | |
| 438 req->semReqData = semReqData; | |
| 439 req->nextReqst = callingPr->requests; | |
| 440 callingPr->requests = req; | |
| 441 } | |
| 442 | |
| 443 | |
| 444 /*Use this to get first request before starting request handler's loop | |
| 445 */ | |
| 446 VMSReqst * | |
| 447 VMS__take_top_request_from( VirtProcr *procrWithReq ) | |
| 448 { VMSReqst *req; | |
| 449 | |
| 450 req = procrWithReq->requests; | 531 req = procrWithReq->requests; |
| 451 if( req == NULL ) return req; | 532 if( req == NULL ) return NULL; |
| 452 | 533 |
| 453 procrWithReq->requests = procrWithReq->requests->nextReqst; | 534 procrWithReq->requests = procrWithReq->requests->nextReqst; |
| 454 return req; | 535 return req; |
| 455 } | |
| 456 | |
| 457 /*A subtle bug due to freeing then accessing "next" after freed caused this | |
| 458 * form of call to be put in -- so call this at end of request handler loop | |
| 459 * that iterates through the requests. | |
| 460 */ | |
| 461 VMSReqst * | |
| 462 VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq ) | |
| 463 { VMSReqst *req; | |
| 464 | |
| 465 req = procrWithReq->requests; | |
| 466 if( req == NULL ) return NULL; | |
| 467 | |
| 468 procrWithReq->requests = procrWithReq->requests->nextReqst; | |
| 469 VMS__free_request( req ); | |
| 470 return procrWithReq->requests; | |
| 471 } | |
| 472 | |
| 473 | |
| 474 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion | |
| 475 // of a request -- IE call with both a virt procr and a fn-ptr to request | |
| 476 // freer (also maybe put sem request freer as a field in virt procr?) | |
| 477 //MeasVMS relies right now on this only freeing VMS layer of request -- the | |
| 478 // semantic portion of request is alloc'd and freed by request handler | |
| 479 void | |
| 480 VMS__free_request( VMSReqst *req ) | |
| 481 { | |
| 482 free( req ); | |
| 483 } | |
| 484 | |
| 485 | |
| 486 | |
| 487 inline int | |
| 488 VMS__isSemanticReqst( VMSReqst *req ) | |
| 489 { | |
| 490 return ( req->reqType == semantic ); | |
| 491 } | 536 } |
| 492 | 537 |
| 493 | 538 |
| 494 inline void * | 539 inline void * |
| 495 VMS__take_sem_reqst_from( VMSReqst *req ) | 540 VMS__take_sem_reqst_from( VMSReqst *req ) |
| 496 { | 541 { |
| 497 return req->semReqData; | 542 return req->semReqData; |
| 498 } | 543 } |
| 499 | 544 |
| 500 inline int | 545 |
| 501 VMS__isDissipateReqst( VMSReqst *req ) | 546 |
| 502 { | 547 /* This is for OS requests and VMS infrastructure requests, such as to create |
| 503 return ( req->reqType == dissipate ); | 548 * a probe -- a probe is inside the heart of VMS-core, it's not part of any |
| 504 } | 549 * language -- but it's also a semantic thing that's triggered from and used |
| 505 | 550 * in the application.. so it crosses abstractions.. so, need some special |
| 506 inline int | 551 * pattern here for handling such requests. |
| 507 VMS__isCreateReqst( VMSReqst *req ) | 552 * This is called from the language's request handler when it sees a request |
| 508 { | 553 * of type VMSSemReq |
| 509 return ( req->reqType == regCreated ); | 554 */ |
| 510 } | 555 void inline |
| 511 | 556 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv, |
| 512 void | 557 ResumePrFnPtr resumePrFnPtr ) |
| 513 VMS__send_req_to_register_new_procr(VirtProcr *newPr, VirtProcr *reqstingPr) | 558 { VMSSemReq *semReq; |
| 514 { VMSReqst *req; | 559 IntervalProbe *newProbe; |
| 515 | 560 int32 nameLen; |
| 516 req = malloc( sizeof(VMSReqst) ); | 561 |
| 517 req->reqType = regCreated; | 562 semReq = req->semReqData; |
| 518 req->semReqData = newPr; | 563 |
| 519 req->nextReqst = reqstingPr->requests; | 564 newProbe = VMS__malloc( sizeof(IntervalProbe) ); |
| 520 reqstingPr->requests = req; | 565 nameLen = strlen( semReq->nameStr ); |
| 521 | 566 newProbe->nameStr = VMS__malloc( nameLen ); |
| 522 VMS__suspend_procr( reqstingPr ); | 567 memcpy( newProbe->nameStr, semReq->nameStr, nameLen ); |
| 568 newProbe->hist = NULL; | |
| 569 newProbe->schedChoiceWasRecorded = FALSE; | |
| 570 newProbe->probeID = | |
| 571 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo ); | |
| 572 | |
| 573 requestingPr->dataReturnedFromReq = newProbe; | |
| 574 | |
| 575 (*resumePrFnPtr)( requestingPr, semEnv ); | |
| 523 } | 576 } |
| 524 | 577 |
| 525 | 578 |
| 526 | 579 |
| 527 /*This must be called by the request handler plugin -- it cannot be called | 580 /*This must be called by the request handler plugin -- it cannot be called |
| 528 * from the semantic library "dissipate processor" function -- instead, the | 581 * from the semantic library "dissipate processor" function -- instead, the |
| 529 * semantic layer has to generate a request for the plug-in to call this | 582 * semantic layer has to generate a request, and the plug-in calls this |
| 530 * function. | 583 * function. |
| 531 *The reason is that this frees the virtual processor's stack -- which is | 584 *The reason is that this frees the virtual processor's stack -- which is |
| 532 * still in use inside semantic library calls! | 585 * still in use inside semantic library calls! |
| 533 * | 586 * |
| 534 *This frees or recycles all the state owned by and comprising the VMS | 587 *This frees or recycles all the state owned by and comprising the VMS |
| 546 { | 599 { |
| 547 //dis-own all locations owned by this processor, causing to be freed | 600 //dis-own all locations owned by this processor, causing to be freed |
| 548 // any locations that it is (was) sole owner of | 601 // any locations that it is (was) sole owner of |
| 549 //TODO: implement VMS__malloc system, including "give up ownership" | 602 //TODO: implement VMS__malloc system, including "give up ownership" |
| 550 | 603 |
| 551 //The dissipate request might still be attached, so remove and free it | |
| 552 VMS__free_top_and_give_next_request_from( animatingPr ); | |
| 553 | 604 |
| 554 //NOTE: initialData was given to the processor, so should either have | 605 //NOTE: initialData was given to the processor, so should either have |
| 555 // been alloc'd with VMS__malloc, or freed by the level above animPr. | 606 // been alloc'd with VMS__malloc, or freed by the level above animPr. |
| 556 //So, all that's left to free here is the stack and the VirtProcr struc | 607 //So, all that's left to free here is the stack and the VirtProcr struc |
| 557 // itself | 608 // itself |
| 558 free( animatingPr->startOfStack ); | 609 //Note, should not stack-allocate initial data -- no guarantee, in |
| 559 free( animatingPr ); | 610 // general that creating processor will outlive ones it creates. |
| 611 VMS__free( animatingPr->startOfStack ); | |
| 612 VMS__free( animatingPr ); | |
| 560 } | 613 } |
| 561 | 614 |
| 562 | 615 |
| 563 //TODO: re-architect so that have clean separation between request handler | 616 //TODO: re-architect so that have clean separation between request handler |
| 564 // and master loop, for dissipate, create, shutdown, and other non-semantic | 617 // and master loop, for dissipate, create, shutdown, and other non-semantic |
| 601 VirtProcr *shutDownPr; | 654 VirtProcr *shutDownPr; |
| 602 | 655 |
| 603 //create the shutdown processors, one for each core loop -- put them | 656 //create the shutdown processors, one for each core loop -- put them |
| 604 // directly into the Q -- each core will die when gets one | 657 // directly into the Q -- each core will die when gets one |
| 605 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 658 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 606 { | 659 { //Note, this is running in the master |
| 607 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); | 660 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); |
| 608 writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); | 661 writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); |
| 609 } | 662 } |
| 610 | 663 |
| 611 } | 664 } |
| 662 allSchedSlots = _VMSMasterEnv->allSchedSlots; | 715 allSchedSlots = _VMSMasterEnv->allSchedSlots; |
| 663 | 716 |
| 664 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 717 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 665 { | 718 { |
| 666 freeSRSWQ( readyToAnimateQs[ coreIdx ] ); | 719 freeSRSWQ( readyToAnimateQs[ coreIdx ] ); |
| 667 | 720 //master VPs were created external to VMS, so use external free |
| 668 VMS__handle_dissipate_reqst( masterVPs[ coreIdx ] ); | 721 VMS_ext__dissipate_procr( masterVPs[ coreIdx ] ); |
| 669 | 722 |
| 670 freeSchedSlots( allSchedSlots[ coreIdx ] ); | 723 freeSchedSlots( allSchedSlots[ coreIdx ] ); |
| 671 } | 724 } |
| 672 | 725 |
| 673 free( _VMSMasterEnv->readyToAnimateQs ); | 726 free( _VMSMasterEnv->readyToAnimateQs ); |
| 674 free( _VMSMasterEnv->masterVPs ); | 727 free( _VMSMasterEnv->masterVPs ); |
| 675 free( _VMSMasterEnv->allSchedSlots ); | 728 free( _VMSMasterEnv->allSchedSlots ); |
| 729 | |
| 730 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead ); | |
| 731 | |
| 732 //============================= MEASUREMENT STUFF ======================== | |
| 733 #ifdef STATS__TURN_ON_PROBES | |
| 734 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &free ); | |
| 735 #endif | |
| 736 //======================================================================== | |
| 676 | 737 |
| 677 free( _VMSMasterEnv ); | 738 free( _VMSMasterEnv ); |
| 678 } | 739 } |
| 679 | 740 |
| 680 | |
| 681 //=========================================================================== | |
| 682 | |
| 683 inline TSCount getTSCount() | |
| 684 { unsigned int low, high; | |
| 685 TSCount out; | |
| 686 | |
| 687 saveTimeStampCountInto( low, high ); | |
| 688 out = high; | |
| 689 out = (out << 32) + low; | |
| 690 return out; | |
| 691 } | |
| 692 |
