Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
comparison VMS.c @ 53:42dd44df1bb0
Init changed to only use VMS__malloc & uses VMS__malloc versions of utilities
| author | Me |
|---|---|
| date | Mon, 01 Nov 2010 21:21:32 -0700 |
| parents | f59cfa31a579 |
| children | f8508572f3de |
comparison
equal
deleted
inserted
replaced
| 22:0c3bd2780395 | 23:52f3740daaf9 |
|---|---|
| 31 create_the_coreLoop_OS_threads(); | 31 create_the_coreLoop_OS_threads(); |
| 32 | 32 |
| 33 MallocProlog * | 33 MallocProlog * |
| 34 create_free_list(); | 34 create_free_list(); |
| 35 | 35 |
| 36 void | |
| 37 endOSThreadFn( void *initData, VirtProcr *animatingPr ); | |
| 36 | 38 |
| 37 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; | 39 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; |
| 38 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; | 40 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; |
| 39 | 41 |
| 40 //=========================================================================== | 42 //=========================================================================== |
| 87 { MasterEnv *masterEnv; | 89 { MasterEnv *masterEnv; |
| 88 SRSWQueueStruc **readyToAnimateQs; | 90 SRSWQueueStruc **readyToAnimateQs; |
| 89 int coreIdx; | 91 int coreIdx; |
| 90 VirtProcr **masterVPs; | 92 VirtProcr **masterVPs; |
| 91 SchedSlot ***allSchedSlots; //ptr to array of ptrs | 93 SchedSlot ***allSchedSlots; //ptr to array of ptrs |
| 92 | 94 |
| 95 | |
| 93 //Make the master env, which holds everything else | 96 //Make the master env, which holds everything else |
| 94 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); | 97 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); |
| 98 | |
| 99 //Very first thing put into the master env is the free-list, seeded | |
| 100 // with a massive initial chunk of memory. | |
| 101 //After this, all other mallocs are VMS__malloc. | |
| 102 _VMSMasterEnv->freeListHead = VMS_ext__create_free_list(); | |
| 103 | |
| 104 //===================== Only VMS__malloc after this ==================== | |
| 95 masterEnv = _VMSMasterEnv; | 105 masterEnv = _VMSMasterEnv; |
| 96 //Need to set start pt here 'cause used by seed procr, which is created | |
| 97 // before the first core loop starts up. -- not sure how yet.. | |
| 98 // masterEnv->coreLoopStartPt = ; | |
| 99 // masterEnv->coreLoopEndPt = ; | |
| 100 | 106 |
| 101 //Make a readyToAnimateQ for each core loop | 107 //Make a readyToAnimateQ for each core loop |
| 102 readyToAnimateQs = malloc( NUM_CORES * sizeof(SRSWQueueStruc *) ); | 108 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(SRSWQueueStruc *) ); |
| 103 masterVPs = malloc( NUM_CORES * sizeof(VirtProcr *) ); | 109 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) ); |
| 104 | 110 |
| 105 //One array for each core, 3 in array, core's masterVP scheds all | 111 //One array for each core, 3 in array, core's masterVP scheds all |
| 106 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) ); | 112 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) ); |
| 107 | 113 |
| 114 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr | |
| 108 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 115 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 109 { //running in main thread -- normal malloc inside makeSRSWQ | 116 { |
| 110 readyToAnimateQs[ coreIdx ] = makeSRSWQ(); | 117 readyToAnimateQs[ coreIdx ] = makeSRSWQ(); |
| 111 | 118 |
| 112 //Q: should give masterVP core-specific info as its init data? | 119 //Q: should give masterVP core-specific info as its init data? |
| 113 masterVPs[ coreIdx ] = VMS_ext__create_procr( &masterLoop, masterEnv ); | 120 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv ); |
| 114 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; | 121 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; |
| 115 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core | 122 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core |
| 116 _VMSMasterEnv->numMasterInARow[ coreIdx ] = FALSE; | 123 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0; |
| 117 } | 124 } |
| 118 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; | 125 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; |
| 119 _VMSMasterEnv->masterVPs = masterVPs; | 126 _VMSMasterEnv->masterVPs = masterVPs; |
| 120 _VMSMasterEnv->masterLock = UNLOCKED; | 127 _VMSMasterEnv->masterLock = UNLOCKED; |
| 121 _VMSMasterEnv->allSchedSlots = allSchedSlots; | 128 _VMSMasterEnv->allSchedSlots = allSchedSlots; |
| 122 _VMSMasterEnv->numProcrsCreated = 0; | |
| 123 | 129 |
| 124 | 130 |
| 125 //Aug 19, 2010: no longer need to place initial masterVP into queue | 131 //Aug 19, 2010: no longer need to place initial masterVP into queue |
| 126 // because coreLoop now controls -- animates its masterVP when no work | 132 // because coreLoop now controls -- animates its masterVP when no work |
| 127 | 133 |
| 128 _VMSMasterEnv->freeListHead = VMS__create_free_list(); | |
| 129 _VMSMasterEnv->amtOfOutstandingMem = 0; //none allocated yet | |
| 130 | 134 |
| 131 //============================= MEASUREMENT STUFF ======================== | 135 //============================= MEASUREMENT STUFF ======================== |
| 132 #ifdef STATS__TURN_ON_PROBES | 136 #ifdef STATS__TURN_ON_PROBES |
| 133 //creates intervalProbes array and sets pointer to it in masterEnv too | |
| 134 _VMSMasterEnv->dynIntervalProbesInfo = | 137 _VMSMasterEnv->dynIntervalProbesInfo = |
| 135 makeDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 20 ); | 138 makePrivDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 200); |
| 136 | 139 |
| 137 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, NULL ); | 140 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free ); |
| 138 _VMSMasterEnv->masterCreateProbeID = | 141 |
| 139 VMS_ext__record_time_point_into_new_probe( "masterCreateProbe" ); | 142 //put creation time directly into master env, for fast retrieval |
| 140 //Also put creation time directly into master env, for fast retrieval | |
| 141 struct timeval timeStamp; | 143 struct timeval timeStamp; |
| 142 gettimeofday( &(timeStamp), NULL); | 144 gettimeofday( &(timeStamp), NULL); |
| 143 _VMSMasterEnv->createPtInSecs = | 145 _VMSMasterEnv->createPtInSecs = |
| 144 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0); | 146 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0); |
| 145 #endif | 147 #endif |
| 150 SchedSlot ** | 152 SchedSlot ** |
| 151 create_sched_slots() | 153 create_sched_slots() |
| 152 { SchedSlot **schedSlots; | 154 { SchedSlot **schedSlots; |
| 153 int i; | 155 int i; |
| 154 | 156 |
| 155 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); | 157 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); |
| 156 | 158 |
| 157 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 159 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) |
| 158 { | 160 { |
| 159 schedSlots[i] = malloc( sizeof(SchedSlot) ); | 161 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) ); |
| 160 | 162 |
| 161 //Set state to mean "handling requests done, slot needs filling" | 163 //Set state to mean "handling requests done, slot needs filling" |
| 162 schedSlots[i]->workIsDone = FALSE; | 164 schedSlots[i]->workIsDone = FALSE; |
| 163 schedSlots[i]->needsProcrAssigned = TRUE; | 165 schedSlots[i]->needsProcrAssigned = TRUE; |
| 164 } | 166 } |
| 169 void | 171 void |
| 170 freeSchedSlots( SchedSlot **schedSlots ) | 172 freeSchedSlots( SchedSlot **schedSlots ) |
| 171 { int i; | 173 { int i; |
| 172 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 174 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) |
| 173 { | 175 { |
| 174 free( schedSlots[i] ); | 176 VMS__free( schedSlots[i] ); |
| 175 } | 177 } |
| 176 free( schedSlots ); | 178 VMS__free( schedSlots ); |
| 177 } | 179 } |
| 178 | 180 |
| 179 | 181 |
| 180 void | 182 void |
| 181 create_the_coreLoop_OS_threads() | 183 create_the_coreLoop_OS_threads() |
| 189 // stuff before the coreLoops set off. | 191 // stuff before the coreLoops set off. |
| 190 _VMSMasterEnv->setupComplete = 0; | 192 _VMSMasterEnv->setupComplete = 0; |
| 191 | 193 |
| 192 //Make the threads that animate the core loops | 194 //Make the threads that animate the core loops |
| 193 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 195 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 194 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) ); | 196 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) ); |
| 195 coreLoopThdParams[coreIdx]->coreNum = coreIdx; | 197 coreLoopThdParams[coreIdx]->coreNum = coreIdx; |
| 196 | 198 |
| 197 retCode = | 199 retCode = |
| 198 pthread_create( &(coreLoopThdHandles[coreIdx]), | 200 pthread_create( &(coreLoopThdHandles[coreIdx]), |
| 199 thdAttrs, | 201 thdAttrs, |
| 261 create_procr_helper( VirtProcr *newPr, VirtProcrFnPtr fnPtr, | 263 create_procr_helper( VirtProcr *newPr, VirtProcrFnPtr fnPtr, |
| 262 void *initialData, char *stackLocs ) | 264 void *initialData, char *stackLocs ) |
| 263 { | 265 { |
| 264 char *stackPtr; | 266 char *stackPtr; |
| 265 | 267 |
| 266 newPr->procrID = _VMSMasterEnv->numProcrsCreated++; | 268 newPr->startOfStack = stackLocs; |
| 267 newPr->nextInstrPt = fnPtr; | 269 newPr->procrID = _VMSMasterEnv->numProcrsCreated++; |
| 268 newPr->initialData = initialData; | 270 newPr->nextInstrPt = fnPtr; |
| 269 newPr->requests = NULL; | 271 newPr->initialData = initialData; |
| 270 newPr->schedSlot = NULL; | 272 newPr->requests = NULL; |
| 273 newPr->schedSlot = NULL; | |
| 271 | 274 |
| 272 //fnPtr takes two params -- void *initData & void *animProcr | 275 //fnPtr takes two params -- void *initData & void *animProcr |
| 273 //alloc stack locations, make stackPtr be the highest addr minus room | 276 //alloc stack locations, make stackPtr be the highest addr minus room |
| 274 // for 2 params + return addr. Return addr (NULL) is in loc pointed to | 277 // for 2 params + return addr. Return addr (NULL) is in loc pointed to |
| 275 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above | 278 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above |
| 299 | 302 |
| 300 newPr = VMS__malloc( sizeof(VirtProcr) ); | 303 newPr = VMS__malloc( sizeof(VirtProcr) ); |
| 301 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE ); | 304 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE ); |
| 302 if( stackLocs == 0 ) | 305 if( stackLocs == 0 ) |
| 303 { perror("VMS__malloc stack"); exit(1); } | 306 { perror("VMS__malloc stack"); exit(1); } |
| 304 newPr->startOfStack = stackLocs; | |
| 305 | 307 |
| 306 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | 308 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); |
| 307 } | 309 } |
| 308 | 310 |
| 309 /* "ext" designates that it's for use outside the VMS system -- should only | 311 /* "ext" designates that it's for use outside the VMS system -- should only |
| 317 | 319 |
| 318 newPr = malloc( sizeof(VirtProcr) ); | 320 newPr = malloc( sizeof(VirtProcr) ); |
| 319 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); | 321 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); |
| 320 if( stackLocs == 0 ) | 322 if( stackLocs == 0 ) |
| 321 { perror("malloc stack"); exit(1); } | 323 { perror("malloc stack"); exit(1); } |
| 322 newPr->startOfStack = stackLocs; | |
| 323 | 324 |
| 324 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | 325 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); |
| 325 } | 326 } |
| 326 | 327 |
| 327 | 328 |
| 443 * never returns from this call, but instead the virtual processor's trace | 444 * never returns from this call, but instead the virtual processor's trace |
| 444 * gets suspended in this call and all the virt processor's state disap- | 445 * gets suspended in this call and all the virt processor's state disap- |
| 445 * pears -- making that suspend the last thing in the virt procr's trace. | 446 * pears -- making that suspend the last thing in the virt procr's trace. |
| 446 */ | 447 */ |
| 447 void | 448 void |
| 448 VMS__dissipate_procr( VirtProcr *procrToDissipate ) | 449 VMS__send_dissipate_req( VirtProcr *procrToDissipate ) |
| 449 { VMSReqst req; | 450 { VMSReqst req; |
| 450 | 451 |
| 451 req.reqType = dissipate; | 452 req.reqType = dissipate; |
| 452 req.nextReqst = procrToDissipate->requests; | 453 req.nextReqst = procrToDissipate->requests; |
| 453 procrToDissipate->requests = &req; | 454 procrToDissipate->requests = &req; |
| 475 free( procrToDissipate ); | 476 free( procrToDissipate ); |
| 476 } | 477 } |
| 477 | 478 |
| 478 | 479 |
| 479 | 480 |
| 480 /*This inserts the semantic-layer's request data into standard VMS carrier | 481 /*This call's name indicates that request is malloc'd -- so req handler |
| 481 * request data-struct is allocated on stack of this call & ptr to it sent | 482 * has to free any extra requests tacked on before a send, using this. |
| 482 * to plugin | 483 * |
| 484 * This inserts the semantic-layer's request data into standard VMS carrier | |
| 485 * request data-struct that is mallocd. The sem request doesn't need to | |
| 486 * be malloc'd if this is called inside the same call chain before the | |
| 487 * send of the last request is called. | |
| 488 * | |
| 489 *The request handler has to call VMS__free_VMSReq for any of these | |
| 483 */ | 490 */ |
| 484 inline void | 491 inline void |
| 485 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr ) | 492 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData, |
| 486 { VMSReqst req; | 493 VirtProcr *callingPr ) |
| 487 | 494 { VMSReqst *req; |
| 488 req.reqType = semantic; | 495 |
| 489 req.semReqData = semReqData; | 496 req = VMS__malloc( sizeof(VMSReqst) ); |
| 490 req.nextReqst = callingPr->requests; | 497 req->reqType = semantic; |
| 491 callingPr->requests = &req; | 498 req->semReqData = semReqData; |
| 499 req->nextReqst = callingPr->requests; | |
| 500 callingPr->requests = req; | |
| 492 } | 501 } |
| 493 | 502 |
| 494 /*This inserts the semantic-layer's request data into standard VMS carrier | 503 /*This inserts the semantic-layer's request data into standard VMS carrier |
| 495 * request data-struct is allocated on stack of this call & ptr to it sent | 504 * request data-struct is allocated on stack of this call & ptr to it sent |
| 496 * to plugin | 505 * to plugin |
| 571 nameLen = strlen( semReq->nameStr ); | 580 nameLen = strlen( semReq->nameStr ); |
| 572 newProbe->nameStr = VMS__malloc( nameLen ); | 581 newProbe->nameStr = VMS__malloc( nameLen ); |
| 573 memcpy( newProbe->nameStr, semReq->nameStr, nameLen ); | 582 memcpy( newProbe->nameStr, semReq->nameStr, nameLen ); |
| 574 newProbe->hist = NULL; | 583 newProbe->hist = NULL; |
| 575 newProbe->schedChoiceWasRecorded = FALSE; | 584 newProbe->schedChoiceWasRecorded = FALSE; |
| 585 | |
| 586 //This runs in masterVP, so no race-condition worries | |
| 576 newProbe->probeID = | 587 newProbe->probeID = |
| 577 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo ); | 588 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo ); |
| 578 | 589 |
| 579 requestingPr->dataReturnedFromReq = newProbe; | 590 requestingPr->dataRetFromReq = newProbe; |
| 580 | 591 |
| 581 (*resumePrFnPtr)( requestingPr, semEnv ); | 592 (*resumePrFnPtr)( requestingPr, semEnv ); |
| 582 } | 593 } |
| 583 | 594 |
| 584 | 595 |
| 599 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd | 610 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd |
| 600 * state, then that state gets freed (or sent to recycling) as a side-effect | 611 * state, then that state gets freed (or sent to recycling) as a side-effect |
| 601 * of dis-owning it. | 612 * of dis-owning it. |
| 602 */ | 613 */ |
| 603 void | 614 void |
| 604 VMS__handle_dissipate_reqst( VirtProcr *animatingPr ) | 615 VMS__dissipate_procr( VirtProcr *animatingPr ) |
| 605 { | 616 { |
| 606 //dis-own all locations owned by this processor, causing to be freed | 617 //dis-own all locations owned by this processor, causing to be freed |
| 607 // any locations that it is (was) sole owner of | 618 // any locations that it is (was) sole owner of |
| 608 //TODO: implement VMS__malloc system, including "give up ownership" | 619 //TODO: implement VMS__malloc system, including "give up ownership" |
| 609 | 620 |
| 617 VMS__free( animatingPr->startOfStack ); | 628 VMS__free( animatingPr->startOfStack ); |
| 618 VMS__free( animatingPr ); | 629 VMS__free( animatingPr ); |
| 619 } | 630 } |
| 620 | 631 |
| 621 | 632 |
| 622 //TODO: re-architect so that have clean separation between request handler | 633 //TODO: look at architecting cleanest separation between request handler |
| 623 // and master loop, for dissipate, create, shutdown, and other non-semantic | 634 // and master loop, for dissipate, create, shutdown, and other non-semantic |
| 624 // requests. Issue is chain: one removes requests from AppVP, one dispatches | 635 // requests. Issue is chain: one removes requests from AppVP, one dispatches |
| 625 // on type of request, and one handles each type.. but some types require | 636 // on type of request, and one handles each type.. but some types require |
| 626 // action from both request handler and master loop -- maybe just give the | 637 // action from both request handler and master loop -- maybe just give the |
| 627 // request handler calls like: VMS__handle_X_request_type | 638 // request handler calls like: VMS__handle_X_request_type |
| 628 | 639 |
| 629 void | |
| 630 endOSThreadFn( void *initData, VirtProcr *animatingPr ); | |
| 631 | 640 |
| 632 /*This is called by the semantic layer's request handler when it decides its | 641 /*This is called by the semantic layer's request handler when it decides its |
| 633 * time to shut down the VMS system. Calling this causes the core loop OS | 642 * time to shut down the VMS system. Calling this causes the core loop OS |
| 634 * threads to exit, which unblocks the entry-point function that started up | 643 * threads to exit, which unblocks the entry-point function that started up |
| 635 * VMS, and allows it to grab the result and return to the original single- | 644 * VMS, and allows it to grab the result and return to the original single- |
| 639 * and-wait function has to free a bunch of stuff after it detects the | 648 * and-wait function has to free a bunch of stuff after it detects the |
| 640 * threads have all died: the masterEnv, the thread-related locations, | 649 * threads have all died: the masterEnv, the thread-related locations, |
| 641 * masterVP any AppVPs that might still be allocated and sitting in the | 650 * masterVP any AppVPs that might still be allocated and sitting in the |
| 642 * semantic environment, or have been orphaned in the _VMSWorkQ. | 651 * semantic environment, or have been orphaned in the _VMSWorkQ. |
| 643 * | 652 * |
| 644 *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the | 653 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the |
| 645 * locations it needs, and give ownership to masterVP. Then, they will be | 654 * locations it needs, and give ownership to masterVP. Then, they will be |
| 646 * automatically freed when the masterVP is dissipated. (This happens after | 655 * automatically freed. |
| 647 * the core loop threads have all exited) | |
| 648 * | 656 * |
| 649 *In here,create one core-loop shut-down processor for each core loop and put | 657 *In here,create one core-loop shut-down processor for each core loop and put |
| 650 * them all directly into the readyToAnimateQ. | 658 * them all directly into the readyToAnimateQ. |
| 651 *Note, this function can ONLY be called after the semantic environment no | 659 *Note, this function can ONLY be called after the semantic environment no |
| 652 * longer cares if AppVPs get animated after the point this is called. In | 660 * longer cares if AppVPs get animated after the point this is called. In |
| 653 * other words, this can be used as an abort, or else it should only be | 661 * other words, this can be used as an abort, or else it should only be |
| 654 * called when all AppVPs have finished dissipate requests -- only at that | 662 * called when all AppVPs have finished dissipate requests -- only at that |
| 655 * point is it sure that all results have completed. | 663 * point is it sure that all results have completed. |
| 656 */ | 664 */ |
| 657 void | 665 void |
| 658 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr ) | 666 VMS__shutdown() |
| 659 { int coreIdx; | 667 { int coreIdx; |
| 660 VirtProcr *shutDownPr; | 668 VirtProcr *shutDownPr; |
| 661 | 669 |
| 662 //create the shutdown processors, one for each core loop -- put them | 670 //create the shutdown processors, one for each core loop -- put them |
| 663 // directly into the Q -- each core will die when gets one | 671 // directly into the Q -- each core will die when gets one |
| 701 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \ | 709 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \ |
| 702 ); | 710 ); |
| 703 } | 711 } |
| 704 | 712 |
| 705 | 713 |
| 706 /*This is called after the threads have shut down and control has returned | 714 /*This is called from the startup & shutdown |
| 707 * to the semantic layer, in the entry point function in the main thread. | 715 */ |
| 708 * It has to free anything allocated during VMS_init, and any other alloc'd | 716 void |
| 709 * locations that might be left over. | 717 VMS__cleanup_at_end_of_shutdown() |
| 710 */ | |
| 711 void | |
| 712 VMS__cleanup_after_shutdown() | |
| 713 { | 718 { |
| 714 SRSWQueueStruc **readyToAnimateQs; | 719 SRSWQueueStruc **readyToAnimateQs; |
| 715 int coreIdx; | 720 int coreIdx; |
| 716 VirtProcr **masterVPs; | 721 VirtProcr **masterVPs; |
| 717 SchedSlot ***allSchedSlots; //ptr to array of ptrs | 722 SchedSlot ***allSchedSlots; //ptr to array of ptrs |
| 718 | 723 |
| 724 //All the environment data has been allocated with VMS__malloc, so just | |
| 725 // free its internal big-chunk and all inside it disappear. | |
| 726 /* | |
| 719 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs; | 727 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs; |
| 720 masterVPs = _VMSMasterEnv->masterVPs; | 728 masterVPs = _VMSMasterEnv->masterVPs; |
| 721 allSchedSlots = _VMSMasterEnv->allSchedSlots; | 729 allSchedSlots = _VMSMasterEnv->allSchedSlots; |
| 722 | 730 |
| 723 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 731 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 724 { | 732 { |
| 725 freeSRSWQ( readyToAnimateQs[ coreIdx ] ); | 733 freeSRSWQ( readyToAnimateQs[ coreIdx ] ); |
| 726 //master VPs were created external to VMS, so use external free | 734 //master VPs were created external to VMS, so use external free |
| 727 VMS_ext__dissipate_procr( masterVPs[ coreIdx ] ); | 735 VMS__dissipate_procr( masterVPs[ coreIdx ] ); |
| 728 | 736 |
| 729 freeSchedSlots( allSchedSlots[ coreIdx ] ); | 737 freeSchedSlots( allSchedSlots[ coreIdx ] ); |
| 730 } | 738 } |
| 731 | 739 |
| 732 free( _VMSMasterEnv->readyToAnimateQs ); | 740 VMS__free( _VMSMasterEnv->readyToAnimateQs ); |
| 733 free( _VMSMasterEnv->masterVPs ); | 741 VMS__free( _VMSMasterEnv->masterVPs ); |
| 734 free( _VMSMasterEnv->allSchedSlots ); | 742 VMS__free( _VMSMasterEnv->allSchedSlots ); |
| 735 | 743 |
| 736 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead ); | |
| 737 | |
| 738 //============================= MEASUREMENT STUFF ======================== | 744 //============================= MEASUREMENT STUFF ======================== |
| 739 #ifdef STATS__TURN_ON_PROBES | 745 #ifdef STATS__TURN_ON_PROBES |
| 740 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &free ); | 746 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe); |
| 741 #endif | 747 #endif |
| 742 //======================================================================== | 748 //======================================================================== |
| 743 | 749 */ |
| 744 free( _VMSMasterEnv ); | 750 //These are the only two that use system free |
| 745 } | 751 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead ); |
| 746 | 752 free( (void *)_VMSMasterEnv ); |
| 753 } | |
| 754 |
