Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
comparison VMS.c @ 61:984f7d78bfdf
Merge See what happens -- merged test stuff into Nov 8 VMS version
| author | SeanHalle |
|---|---|
| date | Thu, 11 Nov 2010 06:19:51 -0800 |
| parents | 054006c26b92 26d53313a8f2 |
| children |
comparison
equal
deleted
inserted
replaced
| 20:12fa3731f956 | 29:a016209003e1 |
|---|---|
| 4 * Licensed under BSD | 4 * Licensed under BSD |
| 5 */ | 5 */ |
| 6 | 6 |
| 7 #include <stdio.h> | 7 #include <stdio.h> |
| 8 #include <stdlib.h> | 8 #include <stdlib.h> |
| 9 #include <string.h> | |
| 9 #include <malloc.h> | 10 #include <malloc.h> |
| 11 #include <sys/time.h> | |
| 10 | 12 |
| 11 #include "VMS.h" | 13 #include "VMS.h" |
| 12 #include "Queue_impl/BlockingQueue.h" | 14 #include "Queue_impl/BlockingQueue.h" |
| 13 #include "Histogram/Histogram.h" | 15 #include "Histogram/Histogram.h" |
| 14 | 16 |
| 25 void | 27 void |
| 26 create_masterEnv(); | 28 create_masterEnv(); |
| 27 | 29 |
| 28 void | 30 void |
| 29 create_the_coreLoop_OS_threads(); | 31 create_the_coreLoop_OS_threads(); |
| 32 | |
| 33 MallocProlog * | |
| 34 create_free_list(); | |
| 35 | |
| 36 void | |
| 37 endOSThreadFn( void *initData, VirtProcr *animatingPr ); | |
| 30 | 38 |
| 31 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; | 39 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; |
| 32 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; | 40 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; |
| 33 | 41 |
| 34 //=========================================================================== | 42 //=========================================================================== |
| 81 { MasterEnv *masterEnv; | 89 { MasterEnv *masterEnv; |
| 82 VMSQueueStruc **readyToAnimateQs; | 90 VMSQueueStruc **readyToAnimateQs; |
| 83 int coreIdx; | 91 int coreIdx; |
| 84 VirtProcr **masterVPs; | 92 VirtProcr **masterVPs; |
| 85 SchedSlot ***allSchedSlots; //ptr to array of ptrs | 93 SchedSlot ***allSchedSlots; //ptr to array of ptrs |
| 86 | 94 |
| 95 | |
| 87 //Make the master env, which holds everything else | 96 //Make the master env, which holds everything else |
| 88 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); | 97 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); |
| 98 | |
| 99 //Very first thing put into the master env is the free-list, seeded | |
| 100 // with a massive initial chunk of memory. | |
| 101 //After this, all other mallocs are VMS__malloc. | |
| 102 _VMSMasterEnv->freeListHead = VMS_ext__create_free_list(); | |
| 103 | |
| 104 //===================== Only VMS__malloc after this ==================== | |
| 89 masterEnv = _VMSMasterEnv; | 105 masterEnv = _VMSMasterEnv; |
| 90 //Need to set start pt here 'cause used by seed procr, which is created | |
| 91 // before the first core loop starts up. -- not sure how yet.. | |
| 92 // masterEnv->coreLoopStartPt = ; | |
| 93 // masterEnv->coreLoopEndPt = ; | |
| 94 | 106 |
| 95 //Make a readyToAnimateQ for each core loop | 107 //Make a readyToAnimateQ for each core loop |
| 96 readyToAnimateQs = malloc( NUM_CORES * sizeof(VMSQueueStruc *) ); | 108 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) ); |
| 97 masterVPs = malloc( NUM_CORES * sizeof(VirtProcr *) ); | 109 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) ); |
| 98 | 110 |
| 99 //One array for each core, 3 in array, core's masterVP scheds all | 111 //One array for each core, 3 in array, core's masterVP scheds all |
| 100 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) ); | 112 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) ); |
| 101 | 113 |
| 114 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr | |
| 102 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 115 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 103 { | 116 { |
| 104 readyToAnimateQs[ coreIdx ] = makeSRSWQ(); | 117 readyToAnimateQs[ coreIdx ] = makeVMSQ(); |
| 105 | 118 |
| 106 //Q: should give masterVP core-specific into as its init data? | 119 //Q: should give masterVP core-specific info as its init data? |
| 107 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv ); | 120 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv ); |
| 108 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; | 121 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; |
| 109 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core | 122 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core |
| 123 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0; | |
| 124 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL; | |
| 110 } | 125 } |
| 111 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; | 126 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; |
| 112 _VMSMasterEnv->masterVPs = masterVPs; | 127 _VMSMasterEnv->masterVPs = masterVPs; |
| 128 _VMSMasterEnv->masterLock = UNLOCKED; | |
| 113 _VMSMasterEnv->allSchedSlots = allSchedSlots; | 129 _VMSMasterEnv->allSchedSlots = allSchedSlots; |
| 130 _VMSMasterEnv->workStealingLock = UNLOCKED; | |
| 114 | 131 |
| 115 //============================= MEASUREMENT STUFF ======================== | 132 //============================= MEASUREMENT STUFF ======================== |
| 116 #ifdef MEAS__TIME_MASTER | 133 #ifdef MEAS__TIME_MASTER |
| 117 | 134 |
| 118 _VMSMasterEnv->stats->masterTimeHist = makeHistogram( 25, 500, 800 ); | 135 _VMSMasterEnv->stats->masterTimeHist = makeHistogram( 25, 500, 800 ); |
| 123 | 140 |
| 124 //Aug 19, 2010: no longer need to place initial masterVP into queue | 141 //Aug 19, 2010: no longer need to place initial masterVP into queue |
| 125 // because coreLoop now controls -- animates its masterVP when no work | 142 // because coreLoop now controls -- animates its masterVP when no work |
| 126 | 143 |
| 127 | 144 |
| 128 //==================== malloc substitute ======================== | 145 //============================= MEASUREMENT STUFF ======================== |
| 129 // | 146 #ifdef STATS__TURN_ON_PROBES |
| 130 //Testing whether malloc is using thread-local storage and therefore | 147 _VMSMasterEnv->dynIntervalProbesInfo = |
| 131 // causing unreliable behavior. | 148 makePrivDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 200); |
| 132 //Just allocate a massive chunk of memory and roll own malloc/free and | 149 |
| 133 // make app use VMS__malloc_to, which will suspend and perform malloc | 150 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free ); |
| 134 // in the master, taking from this massive chunk. | 151 |
| 135 | 152 //put creation time directly into master env, for fast retrieval |
| 136 // initFreeList(); | 153 struct timeval timeStamp; |
| 137 | 154 gettimeofday( &(timeStamp), NULL); |
| 138 } | 155 _VMSMasterEnv->createPtInSecs = |
| 139 | 156 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0); |
| 140 /* | 157 #endif |
| 141 void | 158 //======================================================================== |
| 142 initMasterMalloc() | 159 |
| 143 { | 160 } |
| 144 _VMSMasterEnv->mallocChunk = malloc( MASSIVE_MALLOC_SIZE ); | |
| 145 | |
| 146 //The free-list element is the first several locations of an | |
| 147 // allocated chunk -- the address given to the application is pre- | |
| 148 // pended with both the ownership structure and the free-list struc. | |
| 149 //So, write the values of these into the first locations of | |
| 150 // mallocChunk -- which marks it as free & puts in its size. | |
| 151 listElem = (FreeListElem *)_VMSMasterEnv->mallocChunk; | |
| 152 listElem->size = MASSIVE_MALLOC_SIZE - NUM_PREPEND_BYTES | |
| 153 listElem->next = NULL; | |
| 154 } | |
| 155 | |
| 156 void | |
| 157 dissipateMasterMalloc() | |
| 158 { | |
| 159 //Just foo code -- to get going -- doing as if free list were link-list | |
| 160 currElem = _VMSMasterEnv->freeList; | |
| 161 while( currElem != NULL ) | |
| 162 { | |
| 163 nextElem = currElem->next; | |
| 164 masterFree( currElem ); | |
| 165 currElem = nextElem; | |
| 166 } | |
| 167 free( _VMSMasterEnv->freeList ); | |
| 168 } | |
| 169 */ | |
| 170 | 161 |
| 171 SchedSlot ** | 162 SchedSlot ** |
| 172 create_sched_slots() | 163 create_sched_slots() |
| 173 { SchedSlot **schedSlots; | 164 { SchedSlot **schedSlots; |
| 174 int i; | 165 int i; |
| 175 | 166 |
| 176 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); | 167 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); |
| 177 | 168 |
| 178 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 169 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) |
| 179 { | 170 { |
| 180 schedSlots[i] = malloc( sizeof(SchedSlot) ); | 171 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) ); |
| 181 | 172 |
| 182 //Set state to mean "handling requests done, slot needs filling" | 173 //Set state to mean "handling requests done, slot needs filling" |
| 183 schedSlots[i]->workIsDone = FALSE; | 174 schedSlots[i]->workIsDone = FALSE; |
| 184 schedSlots[i]->needsProcrAssigned = TRUE; | 175 schedSlots[i]->needsProcrAssigned = TRUE; |
| 185 } | 176 } |
| 190 void | 181 void |
| 191 freeSchedSlots( SchedSlot **schedSlots ) | 182 freeSchedSlots( SchedSlot **schedSlots ) |
| 192 { int i; | 183 { int i; |
| 193 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 184 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) |
| 194 { | 185 { |
| 195 free( schedSlots[i] ); | 186 VMS__free( schedSlots[i] ); |
| 196 } | 187 } |
| 197 free( schedSlots ); | 188 VMS__free( schedSlots ); |
| 198 } | 189 } |
| 199 | 190 |
| 200 | 191 |
| 201 void | 192 void |
| 202 create_the_coreLoop_OS_threads() | 193 create_the_coreLoop_OS_threads() |
| 203 { | 194 { |
| 204 //======================================================================== | 195 //======================================================================== |
| 205 // Create the Threads | 196 // Create the Threads |
| 206 int coreIdx, retCode, i; | 197 int coreIdx, retCode; |
| 207 | 198 |
| 208 //create the arrays used to measure TSC offsets between cores | 199 //create the arrays used to measure TSC offsets between cores |
| 209 pongNums = malloc( NUM_CORES * sizeof( int ) ); | 200 pongNums = malloc( NUM_CORES * sizeof( int ) ); |
| 210 pingTimes = malloc( NUM_CORES * NUM_TSC_ROUND_TRIPS * sizeof( TSCount ) ); | 201 pingTimes = malloc( NUM_CORES * NUM_TSC_ROUND_TRIPS * sizeof( TSCount ) ); |
| 211 pongTimes = malloc( NUM_CORES * NUM_TSC_ROUND_TRIPS * sizeof( TSCount ) ); | 202 pongTimes = malloc( NUM_CORES * NUM_TSC_ROUND_TRIPS * sizeof( TSCount ) ); |
| 225 // stuff before the coreLoops set off. | 216 // stuff before the coreLoops set off. |
| 226 _VMSMasterEnv->setupComplete = 0; | 217 _VMSMasterEnv->setupComplete = 0; |
| 227 | 218 |
| 228 //Make the threads that animate the core loops | 219 //Make the threads that animate the core loops |
| 229 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 220 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 230 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) ); | 221 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) ); |
| 231 coreLoopThdParams[coreIdx]->coreNum = coreIdx; | 222 coreLoopThdParams[coreIdx]->coreNum = coreIdx; |
| 232 | 223 |
| 233 retCode = | 224 retCode = |
| 234 pthread_create( &(coreLoopThdHandles[coreIdx]), | 225 pthread_create( &(coreLoopThdHandles[coreIdx]), |
| 235 thdAttrs, | 226 thdAttrs, |
| 236 &coreLoop, | 227 &coreLoop, |
| 237 (void *)(coreLoopThdParams[coreIdx]) ); | 228 (void *)(coreLoopThdParams[coreIdx]) ); |
| 238 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);} | 229 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);} |
| 239 } | 230 } |
| 240 } | 231 } |
| 241 | 232 |
| 242 /*Semantic layer calls this when it want the system to start running.. | 233 /*Semantic layer calls this when it want the system to start running.. |
| 243 * | 234 * |
| 305 * function call | 296 * function call |
| 306 *No need to save registers on old stack frame, because there's no old | 297 *No need to save registers on old stack frame, because there's no old |
| 307 * animator state to return to -- | 298 * animator state to return to -- |
| 308 * | 299 * |
| 309 */ | 300 */ |
| 310 VirtProcr * | 301 inline VirtProcr * |
| 311 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | 302 create_procr_helper( VirtProcr *newPr, VirtProcrFnPtr fnPtr, |
| 312 { VirtProcr *newPr; | 303 void *initialData, char *stackLocs ) |
| 313 char *stackLocs, *stackPtr; | 304 { |
| 305 char *stackPtr; | |
| 314 | 306 |
| 315 //============================= MEASUREMENT STUFF ======================== | 307 //============================= MEASUREMENT STUFF ======================== |
| 316 #ifdef MEAS__TIME_MASTER | 308 #ifdef MEAS__TIME_MASTER |
| 317 int32 startStamp; | 309 int32 startStamp; |
| 318 saveLowTimeStampCountInto( startStamp ); | 310 saveLowTimeStampCountInto( startStamp ); |
| 319 #endif | 311 #endif |
| 320 //======================================================================== | 312 //======================================================================== |
| 321 | 313 newPr->startOfStack = stackLocs; |
| 322 newPr = malloc( sizeof(VirtProcr) ); | 314 newPr->procrID = _VMSMasterEnv->numProcrsCreated++; |
| 323 newPr->procrID = numProcrsCreated++; | 315 newPr->nextInstrPt = fnPtr; |
| 324 newPr->nextInstrPt = fnPtr; | 316 newPr->initialData = initialData; |
| 325 newPr->initialData = initialData; | 317 newPr->requests = NULL; |
| 326 newPr->requests = NULL; | 318 newPr->schedSlot = NULL; |
| 327 newPr->schedSlot = NULL; | |
| 328 // newPr->coreLoopStartPt = _VMSMasterEnv->coreLoopStartPt; | |
| 329 | 319 |
| 330 //fnPtr takes two params -- void *initData & void *animProcr | 320 //fnPtr takes two params -- void *initData & void *animProcr |
| 331 //alloc stack locations, make stackPtr be the highest addr minus room | 321 //alloc stack locations, make stackPtr be the highest addr minus room |
| 332 // for 2 params + return addr. Return addr (NULL) is in loc pointed to | 322 // for 2 params + return addr. Return addr (NULL) is in loc pointed to |
| 333 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above | 323 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above |
| 334 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); | |
| 335 if(stackLocs == 0) {perror("error: malloc stack"); exit(1);} | |
| 336 newPr->startOfStack = stackLocs; | |
| 337 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 ); | 324 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 ); |
| 325 | |
| 338 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp | 326 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp |
| 339 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer | 327 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer |
| 340 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left | 328 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left |
| 341 newPr->stackPtr = stackPtr; //core loop will switch to this, then | 329 newPr->stackPtr = stackPtr; //core loop will switch to this, then |
| 342 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr | 330 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr |
| 345 #ifdef MEAS__TIME_MASTER | 333 #ifdef MEAS__TIME_MASTER |
| 346 int32 endStamp; | 334 int32 endStamp; |
| 347 saveLowTimeStampCountInto( endStamp ); | 335 saveLowTimeStampCountInto( endStamp ); |
| 348 addIntervalToHist( startStamp, endStamp, | 336 addIntervalToHist( startStamp, endStamp, |
| 349 _VMSMasterEnv->stats->createHist ); | 337 _VMSMasterEnv->stats->createHist ); |
| 338 //============================= MEASUREMENT STUFF ======================== | |
| 339 #ifdef STATS__TURN_ON_PROBES | |
| 340 struct timeval timeStamp; | |
| 341 gettimeofday( &(timeStamp), NULL); | |
| 342 newPr->createPtInSecs = timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0) - | |
| 343 _VMSMasterEnv->createPtInSecs; | |
| 350 #endif | 344 #endif |
| 351 //======================================================================== | 345 //======================================================================== |
| 352 | 346 |
| 353 return newPr; | 347 return newPr; |
| 348 } | |
| 349 | |
| 350 inline VirtProcr * | |
| 351 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | |
| 352 { VirtProcr *newPr; | |
| 353 char *stackLocs; | |
| 354 | |
| 355 newPr = VMS__malloc( sizeof(VirtProcr) ); | |
| 356 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE ); | |
| 357 if( stackLocs == 0 ) | |
| 358 { perror("VMS__malloc stack"); exit(1); } | |
| 359 | |
| 360 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | |
| 361 } | |
| 362 | |
| 363 /* "ext" designates that it's for use outside the VMS system -- should only | |
| 364 * be called from main thread or other thread -- never from code animated by | |
| 365 * a VMS virtual processor. | |
| 366 */ | |
| 367 inline VirtProcr * | |
| 368 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | |
| 369 { VirtProcr *newPr; | |
| 370 char *stackLocs; | |
| 371 | |
| 372 newPr = malloc( sizeof(VirtProcr) ); | |
| 373 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); | |
| 374 if( stackLocs == 0 ) | |
| 375 { perror("malloc stack"); exit(1); } | |
| 376 | |
| 377 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | |
| 354 } | 378 } |
| 355 | 379 |
| 356 | 380 |
| 357 /*there is a label inside this function -- save the addr of this label in | 381 /*there is a label inside this function -- save the addr of this label in |
| 358 * the callingPr struc, as the pick-up point from which to start the next | 382 * the callingPr struc, as the pick-up point from which to start the next |
| 363 * there, and will get passed along, inside the request handler, to the | 387 * there, and will get passed along, inside the request handler, to the |
| 364 * next work-unit for that procr. | 388 * next work-unit for that procr. |
| 365 */ | 389 */ |
| 366 void | 390 void |
| 367 VMS__suspend_procr( VirtProcr *animatingPr ) | 391 VMS__suspend_procr( VirtProcr *animatingPr ) |
| 368 { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr; | 392 { |
| 369 void *coreLoopFramePtr; | |
| 370 | 393 |
| 371 //The request to master will cause this suspended virt procr to get | 394 //The request to master will cause this suspended virt procr to get |
| 372 // scheduled again at some future point -- to resume, core loop jumps | 395 // scheduled again at some future point -- to resume, core loop jumps |
| 373 // to the resume point (below), which causes restore of saved regs and | 396 // to the resume point (below), which causes restore of saved regs and |
| 374 // "return" from this call. | 397 // "return" from this call. |
| 375 animatingPr->nextInstrPt = &&ResumePt; | 398 animatingPr->nextInstrPt = &&ResumePt; |
| 376 | 399 |
| 377 //return ownership of the virt procr and sched slot to Master virt pr | 400 //return ownership of the virt procr and sched slot to Master virt pr |
| 378 animatingPr->schedSlot->workIsDone = TRUE; | 401 animatingPr->schedSlot->workIsDone = TRUE; |
| 379 // coreIdx = callingPr->coreAnimatedBy; | |
| 380 | |
| 381 stackPtrAddr = &(animatingPr->stackPtr); | |
| 382 framePtrAddr = &(animatingPr->framePtr); | |
| 383 | |
| 384 jmpPt = _VMSMasterEnv->coreLoopStartPt; | |
| 385 coreLoopFramePtr = animatingPr->coreLoopFramePtr;//need this only | |
| 386 coreLoopStackPtr = animatingPr->coreLoopStackPtr;//safety | |
| 387 | |
| 388 //Save the virt procr's stack and frame ptrs, | |
| 389 asm volatile("movl %0, %%eax; \ | |
| 390 movl %%esp, (%%eax); \ | |
| 391 movl %1, %%eax; \ | |
| 392 movl %%ebp, (%%eax) "\ | |
| 393 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \ | |
| 394 /* inputs */ : \ | |
| 395 /* clobber */ : "%eax" \ | |
| 396 ); | |
| 397 | 402 |
| 398 //=========================== Measurement stuff ======================== | 403 //=========================== Measurement stuff ======================== |
| 399 #ifdef MEAS__TIME_STAMP_SUSP | 404 #ifdef MEAS__TIME_STAMP_SUSP |
| 400 //record time stamp: compare to time-stamp recorded below | 405 //record time stamp: compare to time-stamp recorded below |
| 401 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow ); | 406 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow ); |
| 402 #endif | 407 #endif |
| 403 //======================================================================= | 408 //======================================================================= |
| 404 | 409 |
| 405 //restore coreloop's frame ptr, then jump back to "start" of core loop | 410 |
| 406 //Note, GCC compiles to assembly that saves esp and ebp in the stack | 411 SwitchToCoreLoop( animatingPr ) |
| 407 // frame -- so have to explicitly do assembly that saves to memory | 412 |
| 408 asm volatile("movl %0, %%eax; \ | 413 //======================================================================= |
| 409 movl %1, %%esp; \ | |
| 410 movl %2, %%ebp; \ | |
| 411 jmp %%eax " \ | |
| 412 /* outputs */ : \ | |
| 413 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\ | |
| 414 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \ | |
| 415 ); //list everything as clobbered to force GCC to save all | |
| 416 // live vars that are in regs on stack before this | |
| 417 // assembly, so that stack pointer is correct, before jmp | |
| 418 | |
| 419 ResumePt: | 414 ResumePt: |
| 420 #ifdef MEAS__TIME_STAMP_SUSP | 415 #ifdef MEAS__TIME_STAMP_SUSP |
| 421 //NOTE: only take low part of count -- do sanity check when take diff | 416 //NOTE: only take low part of count -- do sanity check when take diff |
| 422 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow ); | 417 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow ); |
| 423 #endif | 418 #endif |
| 424 | 419 |
| 425 return; | 420 return; |
| 426 } | 421 } |
| 427 | 422 |
| 428 | 423 |
| 424 | |
| 425 /*For this implementation of VMS, it may not make much sense to have the | |
| 426 * system of requests for creating a new processor done this way.. but over | |
| 427 * the scope of single-master, multi-master, mult-tasking, OS-implementing, | |
| 428 * distributed-memory, and so on, this gives VMS implementation a chance to | |
| 429 * do stuff before suspend, in the AppVP, and in the Master before the plugin | |
| 430 * is called, as well as in the lang-lib before this is called, and in the | |
| 431 * plugin. So, this gives both VMS and language implementations a chance to | |
| 432 * intercept at various points and do order-dependent stuff. | |
| 433 *Having a standard VMSNewPrReqData struc allows the language to create and | |
| 434 * free the struc, while VMS knows how to get the newPr if it wants it, and | |
| 435 * it lets the lang have lang-specific data related to creation transported | |
| 436 * to the plugin. | |
| 437 */ | |
| 438 void | |
| 439 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr ) | |
| 440 { VMSReqst req; | |
| 441 | |
| 442 req.reqType = createReq; | |
| 443 req.semReqData = semReqData; | |
| 444 req.nextReqst = reqstingPr->requests; | |
| 445 reqstingPr->requests = &req; | |
| 446 | |
| 447 VMS__suspend_procr( reqstingPr ); | |
| 448 } | |
| 429 | 449 |
| 430 | 450 |
| 431 /* | 451 /* |
| 432 *This adds a request to dissipate, then suspends the processor so that the | 452 *This adds a request to dissipate, then suspends the processor so that the |
| 433 * request handler will receive the request. The request handler is what | 453 * request handler will receive the request. The request handler is what |
| 448 * never returns from this call, but instead the virtual processor's trace | 468 * never returns from this call, but instead the virtual processor's trace |
| 449 * gets suspended in this call and all the virt processor's state disap- | 469 * gets suspended in this call and all the virt processor's state disap- |
| 450 * pears -- making that suspend the last thing in the virt procr's trace. | 470 * pears -- making that suspend the last thing in the virt procr's trace. |
| 451 */ | 471 */ |
| 452 void | 472 void |
| 453 VMS__dissipate_procr( VirtProcr *procrToDissipate ) | 473 VMS__send_dissipate_req( VirtProcr *procrToDissipate ) |
| 474 { VMSReqst req; | |
| 475 | |
| 476 req.reqType = dissipate; | |
| 477 req.nextReqst = procrToDissipate->requests; | |
| 478 procrToDissipate->requests = &req; | |
| 479 | |
| 480 VMS__suspend_procr( procrToDissipate ); | |
| 481 } | |
| 482 | |
| 483 | |
| 484 /* "ext" designates that it's for use outside the VMS system -- should only | |
| 485 * be called from main thread or other thread -- never from code animated by | |
| 486 * a VMS virtual processor. | |
| 487 * | |
| 488 *Use this version to dissipate VPs created outside the VMS system. | |
| 489 */ | |
| 490 void | |
| 491 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate ) | |
| 492 { | |
| 493 //NOTE: initialData was given to the processor, so should either have | |
| 494 // been alloc'd with VMS__malloc, or freed by the level above animPr. | |
| 495 //So, all that's left to free here is the stack and the VirtProcr struc | |
| 496 // itself | |
| 497 //Note, should not stack-allocate initial data -- no guarantee, in | |
| 498 // general that creating processor will outlive ones it creates. | |
| 499 free( procrToDissipate->startOfStack ); | |
| 500 free( procrToDissipate ); | |
| 501 } | |
| 502 | |
| 503 | |
| 504 | |
| 505 /*This call's name indicates that request is malloc'd -- so req handler | |
| 506 * has to free any extra requests tacked on before a send, using this. | |
| 507 * | |
| 508 * This inserts the semantic-layer's request data into standard VMS carrier | |
| 509 * request data-struct that is mallocd. The sem request doesn't need to | |
| 510 * be malloc'd if this is called inside the same call chain before the | |
| 511 * send of the last request is called. | |
| 512 * | |
| 513 *The request handler has to call VMS__free_VMSReq for any of these | |
| 514 */ | |
| 515 inline void | |
| 516 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData, | |
| 517 VirtProcr *callingPr ) | |
| 454 { VMSReqst *req; | 518 { VMSReqst *req; |
| 455 | 519 |
| 456 req = malloc( sizeof(VMSReqst) ); | 520 req = VMS__malloc( sizeof(VMSReqst) ); |
| 457 // req->virtProcrFrom = callingPr; | 521 req->reqType = semantic; |
| 458 req->reqType = dissipate; | 522 req->semReqData = semReqData; |
| 459 req->nextReqst = procrToDissipate->requests; | 523 req->nextReqst = callingPr->requests; |
| 460 procrToDissipate->requests = req; | 524 callingPr->requests = req; |
| 461 | 525 } |
| 462 VMS__suspend_procr( procrToDissipate ); | |
| 463 } | |
| 464 | |
| 465 | 526 |
| 466 /*This inserts the semantic-layer's request data into standard VMS carrier | 527 /*This inserts the semantic-layer's request data into standard VMS carrier |
| 528 * request data-struct is allocated on stack of this call & ptr to it sent | |
| 529 * to plugin | |
| 530 *Then it does suspend, to cause request to be sent. | |
| 467 */ | 531 */ |
| 468 inline void | 532 inline void |
| 469 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr ) | 533 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr ) |
| 534 { VMSReqst req; | |
| 535 | |
| 536 req.reqType = semantic; | |
| 537 req.semReqData = semReqData; | |
| 538 req.nextReqst = callingPr->requests; | |
| 539 callingPr->requests = &req; | |
| 540 | |
| 541 VMS__suspend_procr( callingPr ); | |
| 542 } | |
| 543 | |
| 544 | |
| 545 inline void | |
| 546 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr ) | |
| 547 { VMSReqst req; | |
| 548 | |
| 549 req.reqType = VMSSemantic; | |
| 550 req.semReqData = semReqData; | |
| 551 req.nextReqst = callingPr->requests; //gab any other preceeding | |
| 552 callingPr->requests = &req; | |
| 553 | |
| 554 VMS__suspend_procr( callingPr ); | |
| 555 } | |
| 556 | |
| 557 | |
| 558 /* | |
| 559 */ | |
| 560 VMSReqst * | |
| 561 VMS__take_next_request_out_of( VirtProcr *procrWithReq ) | |
| 470 { VMSReqst *req; | 562 { VMSReqst *req; |
| 471 | 563 |
| 472 req = malloc( sizeof(VMSReqst) ); | |
| 473 // req->virtProcrFrom = callingPr; | |
| 474 req->reqType = semantic; | |
| 475 req->semReqData = semReqData; | |
| 476 req->nextReqst = callingPr->requests; | |
| 477 callingPr->requests = req; | |
| 478 } | |
| 479 | |
| 480 | |
| 481 /*Use this to get first request before starting request handler's loop | |
| 482 */ | |
| 483 VMSReqst * | |
| 484 VMS__take_top_request_from( VirtProcr *procrWithReq ) | |
| 485 { VMSReqst *req; | |
| 486 | |
| 487 req = procrWithReq->requests; | 564 req = procrWithReq->requests; |
| 488 if( req == NULL ) return req; | 565 if( req == NULL ) return NULL; |
| 489 | 566 |
| 490 procrWithReq->requests = procrWithReq->requests->nextReqst; | 567 procrWithReq->requests = procrWithReq->requests->nextReqst; |
| 491 return req; | 568 return req; |
| 492 } | 569 } |
| 493 | 570 |
| 494 /*A subtle bug due to freeing then accessing "next" after freed caused this | |
| 495 * form of call to be put in -- so call this at end of request handler loop | |
| 496 * that iterates through the requests. | |
| 497 */ | |
| 498 VMSReqst * | |
| 499 VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq ) | |
| 500 { VMSReqst *req; | |
| 501 | |
| 502 req = procrWithReq->requests; | |
| 503 if( req == NULL ) return NULL; | |
| 504 | |
| 505 procrWithReq->requests = procrWithReq->requests->nextReqst; | |
| 506 VMS__free_request( req ); | |
| 507 return procrWithReq->requests; | |
| 508 } | |
| 509 | |
| 510 | |
| 511 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion | |
| 512 // of a request -- IE call with both a virt procr and a fn-ptr to request | |
| 513 // freer (also maybe put sem request freer as a field in virt procr?) | |
| 514 //MeasVMS relies right now on this only freeing VMS layer of request -- the | |
| 515 // semantic portion of request is alloc'd and freed by request handler | |
| 516 void | |
| 517 VMS__free_request( VMSReqst *req ) | |
| 518 { | |
| 519 free( req ); | |
| 520 } | |
| 521 | |
| 522 | |
| 523 | |
| 524 inline int | |
| 525 VMS__isSemanticReqst( VMSReqst *req ) | |
| 526 { | |
| 527 return ( req->reqType == semantic ); | |
| 528 } | |
| 529 | |
| 530 | 571 |
| 531 inline void * | 572 inline void * |
| 532 VMS__take_sem_reqst_from( VMSReqst *req ) | 573 VMS__take_sem_reqst_from( VMSReqst *req ) |
| 533 { | 574 { |
| 534 return req->semReqData; | 575 return req->semReqData; |
| 535 } | 576 } |
| 536 | 577 |
| 537 inline int | 578 |
| 538 VMS__isDissipateReqst( VMSReqst *req ) | 579 |
| 539 { | 580 /* This is for OS requests and VMS infrastructure requests, such as to create |
| 540 return ( req->reqType == dissipate ); | 581 * a probe -- a probe is inside the heart of VMS-core, it's not part of any |
| 541 } | 582 * language -- but it's also a semantic thing that's triggered from and used |
| 542 | 583 * in the application.. so it crosses abstractions.. so, need some special |
| 543 inline int | 584 * pattern here for handling such requests. |
| 544 VMS__isCreateReqst( VMSReqst *req ) | 585 * Doing this just like it were a second language sharing VMS-core. |
| 545 { | 586 * |
| 546 return ( req->reqType == regCreated ); | 587 * This is called from the language's request handler when it sees a request |
| 547 } | 588 * of type VMSSemReq |
| 548 | 589 * |
| 549 void | 590 * TODO: Later change this, to give probes their own separate plugin & have |
| 550 VMS__send_req_to_register_new_procr(VirtProcr *newPr, VirtProcr *reqstingPr) | 591 * VMS-core steer the request to appropriate plugin |
| 551 { VMSReqst *req; | 592 * Do the same for OS calls -- look later at it.. |
| 552 | 593 */ |
| 553 req = malloc( sizeof(VMSReqst) ); | 594 void inline |
| 554 req->reqType = regCreated; | 595 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv, |
| 555 req->semReqData = newPr; | 596 ResumePrFnPtr resumePrFnPtr ) |
| 556 req->nextReqst = reqstingPr->requests; | 597 { VMSSemReq *semReq; |
| 557 reqstingPr->requests = req; | 598 IntervalProbe *newProbe; |
| 558 | 599 int32 nameLen; |
| 559 VMS__suspend_procr( reqstingPr ); | 600 |
| 601 semReq = req->semReqData; | |
| 602 | |
| 603 newProbe = VMS__malloc( sizeof(IntervalProbe) ); | |
| 604 nameLen = strlen( semReq->nameStr ); | |
| 605 newProbe->nameStr = VMS__malloc( nameLen ); | |
| 606 memcpy( newProbe->nameStr, semReq->nameStr, nameLen ); | |
| 607 newProbe->hist = NULL; | |
| 608 newProbe->schedChoiceWasRecorded = FALSE; | |
| 609 | |
| 610 //This runs in masterVP, so no race-condition worries | |
| 611 newProbe->probeID = | |
| 612 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo ); | |
| 613 | |
| 614 requestingPr->dataRetFromReq = newProbe; | |
| 615 | |
| 616 (*resumePrFnPtr)( requestingPr, semEnv ); | |
| 560 } | 617 } |
| 561 | 618 |
| 562 | 619 |
| 563 | 620 |
| 564 /*This must be called by the request handler plugin -- it cannot be called | 621 /*This must be called by the request handler plugin -- it cannot be called |
| 565 * from the semantic library "dissipate processor" function -- instead, the | 622 * from the semantic library "dissipate processor" function -- instead, the |
| 566 * semantic layer has to generate a request for the plug-in to call this | 623 * semantic layer has to generate a request, and the plug-in calls this |
| 567 * function. | 624 * function. |
| 568 *The reason is that this frees the virtual processor's stack -- which is | 625 *The reason is that this frees the virtual processor's stack -- which is |
| 569 * still in use inside semantic library calls! | 626 * still in use inside semantic library calls! |
| 570 * | 627 * |
| 571 *This frees or recycles all the state owned by and comprising the VMS | 628 *This frees or recycles all the state owned by and comprising the VMS |
| 577 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd | 634 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd |
| 578 * state, then that state gets freed (or sent to recycling) as a side-effect | 635 * state, then that state gets freed (or sent to recycling) as a side-effect |
| 579 * of dis-owning it. | 636 * of dis-owning it. |
| 580 */ | 637 */ |
| 581 void | 638 void |
| 582 VMS__handle_dissipate_reqst( VirtProcr *animatingPr ) | 639 VMS__dissipate_procr( VirtProcr *animatingPr ) |
| 583 { | 640 { |
| 584 //dis-own all locations owned by this processor, causing to be freed | 641 //dis-own all locations owned by this processor, causing to be freed |
| 585 // any locations that it is (was) sole owner of | 642 // any locations that it is (was) sole owner of |
| 586 //TODO: implement VMS__malloc system, including "give up ownership" | 643 //TODO: implement VMS__malloc system, including "give up ownership" |
| 587 | 644 |
| 588 //The dissipate request might still be attached, so remove and free it | |
| 589 VMS__free_top_and_give_next_request_from( animatingPr ); | |
| 590 | 645 |
| 591 //NOTE: initialData was given to the processor, so should either have | 646 //NOTE: initialData was given to the processor, so should either have |
| 592 // been alloc'd with VMS__malloc, or freed by the level above animPr. | 647 // been alloc'd with VMS__malloc, or freed by the level above animPr. |
| 593 //So, all that's left to free here is the stack and the VirtProcr struc | 648 //So, all that's left to free here is the stack and the VirtProcr struc |
| 594 // itself | 649 // itself |
| 595 free( animatingPr->startOfStack ); | 650 //Note, should not stack-allocate initial data -- no guarantee, in |
| 596 free( animatingPr ); | 651 // general that creating processor will outlive ones it creates. |
| 597 } | 652 VMS__free( animatingPr->startOfStack ); |
| 598 | 653 VMS__free( animatingPr ); |
| 599 | 654 } |
| 600 //TODO: re-architect so that have clean separation between request handler | 655 |
| 656 | |
| 657 //TODO: look at architecting cleanest separation between request handler | |
| 601 // and master loop, for dissipate, create, shutdown, and other non-semantic | 658 // and master loop, for dissipate, create, shutdown, and other non-semantic |
| 602 // requests. Issue is chain: one removes requests from AppVP, one dispatches | 659 // requests. Issue is chain: one removes requests from AppVP, one dispatches |
| 603 // on type of request, and one handles each type.. but some types require | 660 // on type of request, and one handles each type.. but some types require |
| 604 // action from both request handler and master loop -- maybe just give the | 661 // action from both request handler and master loop -- maybe just give the |
| 605 // request handler calls like: VMS__handle_X_request_type | 662 // request handler calls like: VMS__handle_X_request_type |
| 606 | 663 |
| 607 void | |
| 608 endOSThreadFn( void *initData, VirtProcr *animatingPr ); | |
| 609 | 664 |
| 610 /*This is called by the semantic layer's request handler when it decides its | 665 /*This is called by the semantic layer's request handler when it decides its |
| 611 * time to shut down the VMS system. Calling this causes the core loop OS | 666 * time to shut down the VMS system. Calling this causes the core loop OS |
| 612 * threads to exit, which unblocks the entry-point function that started up | 667 * threads to exit, which unblocks the entry-point function that started up |
| 613 * VMS, and allows it to grab the result and return to the original single- | 668 * VMS, and allows it to grab the result and return to the original single- |
| 617 * and-wait function has to free a bunch of stuff after it detects the | 672 * and-wait function has to free a bunch of stuff after it detects the |
| 618 * threads have all died: the masterEnv, the thread-related locations, | 673 * threads have all died: the masterEnv, the thread-related locations, |
| 619 * masterVP any AppVPs that might still be allocated and sitting in the | 674 * masterVP any AppVPs that might still be allocated and sitting in the |
| 620 * semantic environment, or have been orphaned in the _VMSWorkQ. | 675 * semantic environment, or have been orphaned in the _VMSWorkQ. |
| 621 * | 676 * |
| 622 *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the | 677 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the |
| 623 * locations it needs, and give ownership to masterVP. Then, they will be | 678 * locations it needs, and give ownership to masterVP. Then, they will be |
| 624 * automatically freed when the masterVP is dissipated. (This happens after | 679 * automatically freed. |
| 625 * the core loop threads have all exited) | |
| 626 * | 680 * |
| 627 *In here,create one core-loop shut-down processor for each core loop and put | 681 *In here,create one core-loop shut-down processor for each core loop and put |
| 628 * them all directly into the readyToAnimateQ. | 682 * them all directly into the readyToAnimateQ. |
| 629 *Note, this function can ONLY be called after the semantic environment no | 683 *Note, this function can ONLY be called after the semantic environment no |
| 630 * longer cares if AppVPs get animated after the point this is called. In | 684 * longer cares if AppVPs get animated after the point this is called. In |
| 631 * other words, this can be used as an abort, or else it should only be | 685 * other words, this can be used as an abort, or else it should only be |
| 632 * called when all AppVPs have finished dissipate requests -- only at that | 686 * called when all AppVPs have finished dissipate requests -- only at that |
| 633 * point is it sure that all results have completed. | 687 * point is it sure that all results have completed. |
| 634 */ | 688 */ |
| 635 void | 689 void |
| 636 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr ) | 690 VMS__shutdown() |
| 637 { int coreIdx; | 691 { int coreIdx; |
| 638 VirtProcr *shutDownPr; | 692 VirtProcr *shutDownPr; |
| 639 | 693 |
| 640 //create the shutdown processors, one for each core loop -- put them | 694 //create the shutdown processors, one for each core loop -- put them |
| 641 // directly into the Q -- each core will die when gets one | 695 // directly into the Q -- each core will die when gets one |
| 642 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 696 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 643 { | 697 { //Note, this is running in the master |
| 644 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); | 698 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); |
| 645 writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); | 699 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); |
| 646 } | 700 } |
| 647 | 701 |
| 648 } | 702 } |
| 649 | 703 |
| 650 | 704 |
| 679 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \ | 733 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \ |
| 680 ); | 734 ); |
| 681 } | 735 } |
| 682 | 736 |
| 683 | 737 |
| 684 /*This is called after the threads have shut down and control has returned | 738 /*This is called from the startup & shutdown |
| 685 * to the semantic layer, in the entry point function in the main thread. | 739 */ |
| 686 * It has to free anything allocated during VMS_init, and any other alloc'd | 740 void |
| 687 * locations that might be left over. | 741 VMS__cleanup_at_end_of_shutdown() |
| 688 */ | |
| 689 void | |
| 690 VMS__cleanup_after_shutdown() | |
| 691 { | 742 { |
| 692 VMSQueueStruc **readyToAnimateQs; | 743 VMSQueueStruc **readyToAnimateQs; |
| 693 int coreIdx; | 744 int coreIdx; |
| 694 VirtProcr **masterVPs; | 745 VirtProcr **masterVPs; |
| 695 SchedSlot ***allSchedSlots; //ptr to array of ptrs | 746 SchedSlot ***allSchedSlots; //ptr to array of ptrs |
| 696 | 747 |
| 748 //All the environment data has been allocated with VMS__malloc, so just | |
| 749 // free its internal big-chunk and all inside it disappear. | |
| 750 /* | |
| 697 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs; | 751 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs; |
| 698 masterVPs = _VMSMasterEnv->masterVPs; | 752 masterVPs = _VMSMasterEnv->masterVPs; |
| 699 allSchedSlots = _VMSMasterEnv->allSchedSlots; | 753 allSchedSlots = _VMSMasterEnv->allSchedSlots; |
| 700 | 754 |
| 701 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 755 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 702 { | 756 { |
| 703 freeSRSWQ( readyToAnimateQs[ coreIdx ] ); | 757 freeVMSQ( readyToAnimateQs[ coreIdx ] ); |
| 704 | 758 //master VPs were created external to VMS, so use external free |
| 705 VMS__handle_dissipate_reqst( masterVPs[ coreIdx ] ); | 759 VMS__dissipate_procr( masterVPs[ coreIdx ] ); |
| 706 | 760 |
| 707 freeSchedSlots( allSchedSlots[ coreIdx ] ); | 761 freeSchedSlots( allSchedSlots[ coreIdx ] ); |
| 708 } | 762 } |
| 709 | 763 |
| 710 free( _VMSMasterEnv->readyToAnimateQs ); | 764 VMS__free( _VMSMasterEnv->readyToAnimateQs ); |
| 711 free( _VMSMasterEnv->masterVPs ); | 765 VMS__free( _VMSMasterEnv->masterVPs ); |
| 712 free( _VMSMasterEnv->allSchedSlots ); | 766 VMS__free( _VMSMasterEnv->allSchedSlots ); |
| 713 | 767 |
| 714 free( _VMSMasterEnv ); | 768 //============================= MEASUREMENT STUFF ======================== |
| 715 } | 769 #ifdef STATS__TURN_ON_PROBES |
| 716 | 770 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe); |
| 717 | 771 #endif |
| 718 //=========================================================================== | 772 //======================================================================== |
| 719 | 773 */ |
| 720 inline TSCount getTSC() | 774 //These are the only two that use system free |
| 721 { unsigned int low, high; | 775 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead ); |
| 722 TSCount out; | 776 free( (void *)_VMSMasterEnv ); |
| 723 | 777 } |
| 724 saveTimeStampCountInto( low, high ); | 778 |
| 725 out = high; | 779 |
| 726 out = (out << 32) + low; | 780 //================================ |
| 727 return out; | 781 |
| 728 } | 782 |
| 729 | 783 /*Later, improve this -- for now, just exits the application after printing |
| 784 * the error message. | |
| 785 */ | |
| 786 void | |
| 787 VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData ) | |
| 788 { | |
| 789 printf(msgStr); | |
| 790 fflush(stdin); | |
| 791 exit(1); | |
| 792 } | |
| 793 | |
| 794 |
