Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
comparison VMS.c @ 205:cb888346c3e0
Changed include paths, but version still does not work because of accidental merge
| author | Merten Sach <msach@mailbox.tu-berlin.de> |
|---|---|
| date | Fri, 17 Feb 2012 18:28:59 +0100 |
| parents | ad8213a8e916 |
| children |
comparison
equal
deleted
inserted
replaced
| 75:c13e3563523b | 93:56f83c07959d |
|---|---|
| 11 #include <inttypes.h> | 11 #include <inttypes.h> |
| 12 #include <sys/time.h> | 12 #include <sys/time.h> |
| 13 | 13 |
| 14 #include "VMS.h" | 14 #include "VMS.h" |
| 15 #include "ProcrContext.h" | 15 #include "ProcrContext.h" |
| 16 #include "Queue_impl/BlockingQueue.h" | |
| 17 #include "Histogram/Histogram.h" | |
| 18 | 16 |
| 19 | 17 |
| 20 #define thdAttrs NULL | 18 #define thdAttrs NULL |
| 21 | 19 |
| 22 //=========================================================================== | 20 //=========================================================================== |
| 23 void | 21 void |
| 24 shutdownFn( void *dummy, VirtProcr *dummy2 ); | 22 shutdownFn( void *dummy, SlaveVP *dummy2 ); |
| 25 | 23 |
| 26 SchedSlot ** | 24 SchedSlot ** |
| 27 create_sched_slots(); | 25 create_sched_slots(); |
| 28 | 26 |
| 29 void | 27 void |
| 34 | 32 |
| 35 MallocProlog * | 33 MallocProlog * |
| 36 create_free_list(); | 34 create_free_list(); |
| 37 | 35 |
| 38 void | 36 void |
| 39 endOSThreadFn( void *initData, VirtProcr *animatingPr ); | 37 endOSThreadFn( void *initData, SlaveVP *animatingPr ); |
| 40 | 38 |
| 41 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; | 39 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; |
| 42 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; | 40 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; |
| 43 | 41 |
| 44 //=========================================================================== | 42 //=========================================================================== |
| 70 /*This allocates VMS data structures, populates the master VMSProc, | 68 /*This allocates VMS data structures, populates the master VMSProc, |
| 71 * and master environment, and returns the master environment to the semantic | 69 * and master environment, and returns the master environment to the semantic |
| 72 * layer. | 70 * layer. |
| 73 */ | 71 */ |
| 74 void | 72 void |
| 75 VMS__init() | 73 VMS_int__init() |
| 76 { | 74 { |
| 77 create_masterEnv(); | 75 create_masterEnv(); |
| 78 create_the_coreLoop_OS_threads(); | 76 create_the_coreLoop_OS_threads(); |
| 79 } | 77 } |
| 80 | 78 |
| 81 #ifdef SEQUENTIAL | 79 #ifdef SEQUENTIAL |
| 82 | 80 |
| 83 /*To initialize the sequential version, just don't create the threads | 81 /*To initialize the sequential version, just don't create the threads |
| 84 */ | 82 */ |
| 85 void | 83 void |
| 86 VMS__init_Seq() | 84 VMS_int__init_Seq() |
| 87 { | 85 { |
| 88 create_masterEnv(); | 86 create_masterEnv(); |
| 89 } | 87 } |
| 90 | 88 |
| 91 #endif | 89 #endif |
| 93 void | 91 void |
| 94 create_masterEnv() | 92 create_masterEnv() |
| 95 { MasterEnv *masterEnv; | 93 { MasterEnv *masterEnv; |
| 96 VMSQueueStruc **readyToAnimateQs; | 94 VMSQueueStruc **readyToAnimateQs; |
| 97 int coreIdx; | 95 int coreIdx; |
| 98 VirtProcr **masterVPs; | 96 SlaveVP **masterVPs; |
| 99 SchedSlot ***allSchedSlots; //ptr to array of ptrs | 97 SchedSlot ***allSchedSlots; //ptr to array of ptrs |
| 100 | 98 |
| 101 | 99 |
| 102 //Make the master env, which holds everything else | 100 //Make the master env, which holds everything else |
| 103 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); | 101 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); |
| 125 | 123 |
| 126 //===================== Only VMS__malloc after this ==================== | 124 //===================== Only VMS__malloc after this ==================== |
| 127 masterEnv = (MasterEnv*)_VMSMasterEnv; | 125 masterEnv = (MasterEnv*)_VMSMasterEnv; |
| 128 | 126 |
| 129 //Make a readyToAnimateQ for each core loop | 127 //Make a readyToAnimateQ for each core loop |
| 130 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) ); | 128 readyToAnimateQs = VMS_int__malloc( NUM_CORES * sizeof(VMSQueueStruc *) ); |
| 131 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) ); | 129 masterVPs = VMS_int__malloc( NUM_CORES * sizeof(SlaveVP *) ); |
| 132 | 130 |
| 133 //One array for each core, 3 in array, core's masterVP scheds all | 131 //One array for each core, 3 in array, core's masterVP scheds all |
| 134 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) ); | 132 allSchedSlots = VMS_int__malloc( NUM_CORES * sizeof(SchedSlot *) ); |
| 135 | 133 |
| 136 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr | 134 _VMSMasterEnv->numVPsCreated = 0; //used by create procr |
| 137 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 135 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 138 { | 136 { |
| 139 readyToAnimateQs[ coreIdx ] = makeVMSQ(); | 137 readyToAnimateQs[ coreIdx ] = makeVMSQ(); |
| 140 | 138 |
| 141 //Q: should give masterVP core-specific info as its init data? | 139 //Q: should give masterVP core-specific info as its init data? |
| 142 masterVPs[ coreIdx ] = VMS__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv ); | 140 masterVPs[ coreIdx ] = VMS_int__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv ); |
| 143 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; | 141 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; |
| 144 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core | 142 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core |
| 145 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0; | 143 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0; |
| 146 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL; | 144 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL; |
| 147 } | 145 } |
| 159 //============================= MEASUREMENT STUFF ======================== | 157 //============================= MEASUREMENT STUFF ======================== |
| 160 #ifdef STATS__TURN_ON_PROBES | 158 #ifdef STATS__TURN_ON_PROBES |
| 161 _VMSMasterEnv->dynIntervalProbesInfo = | 159 _VMSMasterEnv->dynIntervalProbesInfo = |
| 162 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200); | 160 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200); |
| 163 | 161 |
| 164 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free ); | 162 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS_int__free ); |
| 165 | 163 |
| 166 //put creation time directly into master env, for fast retrieval | 164 //put creation time directly into master env, for fast retrieval |
| 167 struct timeval timeStamp; | 165 struct timeval timeStamp; |
| 168 gettimeofday( &(timeStamp), NULL); | 166 gettimeofday( &(timeStamp), NULL); |
| 169 _VMSMasterEnv->createPtInSecs = | 167 _VMSMasterEnv->createPtInSecs = |
| 184 SchedSlot ** | 182 SchedSlot ** |
| 185 create_sched_slots() | 183 create_sched_slots() |
| 186 { SchedSlot **schedSlots; | 184 { SchedSlot **schedSlots; |
| 187 int i; | 185 int i; |
| 188 | 186 |
| 189 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); | 187 schedSlots = VMS_int__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); |
| 190 | 188 |
| 191 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 189 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) |
| 192 { | 190 { |
| 193 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) ); | 191 schedSlots[i] = VMS_int__malloc( sizeof(SchedSlot) ); |
| 194 | 192 |
| 195 //Set state to mean "handling requests done, slot needs filling" | 193 //Set state to mean "handling requests done, slot needs filling" |
| 196 schedSlots[i]->workIsDone = FALSE; | 194 schedSlots[i]->workIsDone = FALSE; |
| 197 schedSlots[i]->needsProcrAssigned = TRUE; | 195 schedSlots[i]->needsProcrAssigned = TRUE; |
| 198 } | 196 } |
| 203 void | 201 void |
| 204 freeSchedSlots( SchedSlot **schedSlots ) | 202 freeSchedSlots( SchedSlot **schedSlots ) |
| 205 { int i; | 203 { int i; |
| 206 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 204 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) |
| 207 { | 205 { |
| 208 VMS__free( schedSlots[i] ); | 206 VMS_int__free( schedSlots[i] ); |
| 209 } | 207 } |
| 210 VMS__free( schedSlots ); | 208 VMS_int__free( schedSlots ); |
| 211 } | 209 } |
| 212 | 210 |
| 213 | 211 |
| 214 void | 212 void |
| 215 create_the_coreLoop_OS_threads() | 213 create_the_coreLoop_OS_threads() |
| 223 // stuff before the coreLoops set off. | 221 // stuff before the coreLoops set off. |
| 224 _VMSMasterEnv->setupComplete = 0; | 222 _VMSMasterEnv->setupComplete = 0; |
| 225 | 223 |
| 226 //Make the threads that animate the core loops | 224 //Make the threads that animate the core loops |
| 227 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 225 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 228 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) ); | 226 { coreLoopThdParams[coreIdx] = VMS_int__malloc( sizeof(ThdParams) ); |
| 229 coreLoopThdParams[coreIdx]->coreNum = coreIdx; | 227 coreLoopThdParams[coreIdx]->coreNum = coreIdx; |
| 230 | 228 |
| 231 retCode = | 229 retCode = |
| 232 pthread_create( &(coreLoopThdHandles[coreIdx]), | 230 pthread_create( &(coreLoopThdHandles[coreIdx]), |
| 233 thdAttrs, | 231 thdAttrs, |
| 240 /*Semantic layer calls this when it want the system to start running.. | 238 /*Semantic layer calls this when it want the system to start running.. |
| 241 * | 239 * |
| 242 *This starts the core loops running then waits for them to exit. | 240 *This starts the core loops running then waits for them to exit. |
| 243 */ | 241 */ |
| 244 void | 242 void |
| 245 VMS__start_the_work_then_wait_until_done() | 243 VMS_WL__start_the_work_then_wait_until_done() |
| 246 { int coreIdx; | 244 { int coreIdx; |
| 247 //Start the core loops running | 245 //Start the core loops running |
| 248 | 246 |
| 249 //tell the core loop threads that setup is complete | 247 //tell the core loop threads that setup is complete |
| 250 //get lock, to lock out any threads still starting up -- they'll see | 248 //get lock, to lock out any threads still starting up -- they'll see |
| 270 #ifdef SEQUENTIAL | 268 #ifdef SEQUENTIAL |
| 271 /*Only difference between version with an OS thread pinned to each core and | 269 /*Only difference between version with an OS thread pinned to each core and |
| 272 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq. | 270 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq. |
| 273 */ | 271 */ |
| 274 void | 272 void |
| 275 VMS__start_the_work_then_wait_until_done_Seq() | 273 VMS_WL__start_the_work_then_wait_until_done_Seq() |
| 276 { | 274 { |
| 277 //Instead of un-suspending threads, just call the one and only | 275 //Instead of un-suspending threads, just call the one and only |
| 278 // core loop (sequential version), in the main thread. | 276 // core loop (sequential version), in the main thread. |
| 279 coreLoop_Seq( NULL ); | 277 coreLoop_Seq( NULL ); |
| 280 flushRegisters(); | 278 flushRegisters(); |
| 281 | 279 |
| 282 } | 280 } |
| 283 #endif | 281 #endif |
| 284 | 282 |
| 285 inline VirtProcr * | 283 inline SlaveVP * |
| 286 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | 284 VMS_int__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) |
| 287 { VirtProcr *newPr; | 285 { SlaveVP *newPr; |
| 288 void *stackLocs; | 286 void *stackLocs; |
| 289 | 287 |
| 290 newPr = VMS__malloc( sizeof(VirtProcr) ); | 288 newPr = VMS_int__malloc( sizeof(SlaveVP) ); |
| 291 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE ); | 289 stackLocs = VMS_int__malloc( VIRT_PROCR_STACK_SIZE ); |
| 292 if( stackLocs == 0 ) | 290 if( stackLocs == 0 ) |
| 293 { perror("VMS__malloc stack"); exit(1); } | 291 { perror("VMS__malloc stack"); exit(1); } |
| 294 | 292 |
| 295 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | 293 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); |
| 296 } | 294 } |
| 297 | 295 |
| 298 /* "ext" designates that it's for use outside the VMS system -- should only | 296 /* "ext" designates that it's for use outside the VMS system -- should only |
| 299 * be called from main thread or other thread -- never from code animated by | 297 * be called from main thread or other thread -- never from code animated by |
| 300 * a VMS virtual processor. | 298 * a VMS virtual processor. |
| 301 */ | 299 */ |
| 302 inline VirtProcr * | 300 inline SlaveVP * |
| 303 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) | 301 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData ) |
| 304 { VirtProcr *newPr; | 302 { SlaveVP *newPr; |
| 305 char *stackLocs; | 303 char *stackLocs; |
| 306 | 304 |
| 307 newPr = malloc( sizeof(VirtProcr) ); | 305 newPr = malloc( sizeof(SlaveVP) ); |
| 308 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); | 306 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); |
| 309 if( stackLocs == 0 ) | 307 if( stackLocs == 0 ) |
| 310 { perror("malloc stack"); exit(1); } | 308 { perror("malloc stack"); exit(1); } |
| 311 | 309 |
| 312 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); | 310 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); |
| 314 | 312 |
| 315 | 313 |
| 316 /*Anticipating multi-tasking | 314 /*Anticipating multi-tasking |
| 317 */ | 315 */ |
| 318 void * | 316 void * |
| 319 VMS__give_sem_env_for( VirtProcr *animPr ) | 317 VMS_WL__give_sem_env_for( SlaveVP *animPr ) |
| 320 { | 318 { |
| 321 return _VMSMasterEnv->semanticEnv; | 319 return _VMSMasterEnv->semanticEnv; |
| 322 } | 320 } |
| 323 //=========================================================================== | 321 //=========================================================================== |
| 324 /*there is a label inside this function -- save the addr of this label in | 322 /*there is a label inside this function -- save the addr of this label in |
| 329 * slave that animated the just-ended work-unit, so all the state is saved | 327 * slave that animated the just-ended work-unit, so all the state is saved |
| 330 * there, and will get passed along, inside the request handler, to the | 328 * there, and will get passed along, inside the request handler, to the |
| 331 * next work-unit for that procr. | 329 * next work-unit for that procr. |
| 332 */ | 330 */ |
| 333 void | 331 void |
| 334 VMS__suspend_procr( VirtProcr *animatingPr ) | 332 VMS_int__suspend_procr( SlaveVP *animatingPr ) |
| 335 { | 333 { |
| 336 | 334 |
| 337 //The request to master will cause this suspended virt procr to get | 335 //The request to master will cause this suspended virt procr to get |
| 338 // scheduled again at some future point -- to resume, core loop jumps | 336 // scheduled again at some future point -- to resume, core loop jumps |
| 339 // to the resume point (below), which causes restore of saved regs and | 337 // to the resume point (below), which causes restore of saved regs and |
| 377 * free the struc, while VMS knows how to get the newPr if it wants it, and | 375 * free the struc, while VMS knows how to get the newPr if it wants it, and |
| 378 * it lets the lang have lang-specific data related to creation transported | 376 * it lets the lang have lang-specific data related to creation transported |
| 379 * to the plugin. | 377 * to the plugin. |
| 380 */ | 378 */ |
| 381 void | 379 void |
| 382 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr ) | 380 VMS_WL__send_create_procr_req( void *semReqData, SlaveVP *reqstingPr ) |
| 383 { VMSReqst req; | 381 { VMSReqst req; |
| 384 | 382 |
| 385 req.reqType = createReq; | 383 req.reqType = createReq; |
| 386 req.semReqData = semReqData; | 384 req.semReqData = semReqData; |
| 387 req.nextReqst = reqstingPr->requests; | 385 req.nextReqst = reqstingPr->requests; |
| 388 reqstingPr->requests = &req; | 386 reqstingPr->requests = &req; |
| 389 | 387 |
| 390 VMS__suspend_procr( reqstingPr ); | 388 VMS_int__suspend_procr( reqstingPr ); |
| 391 } | 389 } |
| 392 | 390 |
| 393 | 391 |
| 394 /* | 392 /* |
| 395 *This adds a request to dissipate, then suspends the processor so that the | 393 *This adds a request to dissipate, then suspends the processor so that the |
| 411 * never returns from this call, but instead the virtual processor's trace | 409 * never returns from this call, but instead the virtual processor's trace |
| 412 * gets suspended in this call and all the virt processor's state disap- | 410 * gets suspended in this call and all the virt processor's state disap- |
| 413 * pears -- making that suspend the last thing in the virt procr's trace. | 411 * pears -- making that suspend the last thing in the virt procr's trace. |
| 414 */ | 412 */ |
| 415 void | 413 void |
| 416 VMS__send_dissipate_req( VirtProcr *procrToDissipate ) | 414 VMS_WL__send_dissipate_req( SlaveVP *procrToDissipate ) |
| 417 { VMSReqst req; | 415 { VMSReqst req; |
| 418 | 416 |
| 419 req.reqType = dissipate; | 417 req.reqType = dissipate; |
| 420 req.nextReqst = procrToDissipate->requests; | 418 req.nextReqst = procrToDissipate->requests; |
| 421 procrToDissipate->requests = &req; | 419 procrToDissipate->requests = &req; |
| 422 | 420 |
| 423 VMS__suspend_procr( procrToDissipate ); | 421 VMS_int__suspend_procr( procrToDissipate ); |
| 424 } | 422 } |
| 425 | 423 |
| 426 | 424 |
| 427 /* "ext" designates that it's for use outside the VMS system -- should only | 425 /* "ext" designates that it's for use outside the VMS system -- should only |
| 428 * be called from main thread or other thread -- never from code animated by | 426 * be called from main thread or other thread -- never from code animated by |
| 429 * a VMS virtual processor. | 427 * a VMS virtual processor. |
| 430 * | 428 * |
| 431 *Use this version to dissipate VPs created outside the VMS system. | 429 *Use this version to dissipate VPs created outside the VMS system. |
| 432 */ | 430 */ |
| 433 void | 431 void |
| 434 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate ) | 432 VMS_ext__dissipate_procr( SlaveVP *procrToDissipate ) |
| 435 { | 433 { |
| 436 //NOTE: initialData was given to the processor, so should either have | 434 //NOTE: initialData was given to the processor, so should either have |
| 437 // been alloc'd with VMS__malloc, or freed by the level above animPr. | 435 // been alloc'd with VMS__malloc, or freed by the level above animPr. |
| 438 //So, all that's left to free here is the stack and the VirtProcr struc | 436 //So, all that's left to free here is the stack and the VirtProcr struc |
| 439 // itself | 437 // itself |
| 454 * send of the last request is called. | 452 * send of the last request is called. |
| 455 * | 453 * |
| 456 *The request handler has to call VMS__free_VMSReq for any of these | 454 *The request handler has to call VMS__free_VMSReq for any of these |
| 457 */ | 455 */ |
| 458 inline void | 456 inline void |
| 459 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData, | 457 VMS_WL__add_sem_request_in_mallocd_VMSReqst( void *semReqData, |
| 460 VirtProcr *callingPr ) | 458 SlaveVP *callingPr ) |
| 461 { VMSReqst *req; | 459 { VMSReqst *req; |
| 462 | 460 |
| 463 req = VMS__malloc( sizeof(VMSReqst) ); | 461 req = VMS_int__malloc( sizeof(VMSReqst) ); |
| 464 req->reqType = semantic; | 462 req->reqType = semantic; |
| 465 req->semReqData = semReqData; | 463 req->semReqData = semReqData; |
| 466 req->nextReqst = callingPr->requests; | 464 req->nextReqst = callingPr->requests; |
| 467 callingPr->requests = req; | 465 callingPr->requests = req; |
| 468 } | 466 } |
| 471 * request data-struct is allocated on stack of this call & ptr to it sent | 469 * request data-struct is allocated on stack of this call & ptr to it sent |
| 472 * to plugin | 470 * to plugin |
| 473 *Then it does suspend, to cause request to be sent. | 471 *Then it does suspend, to cause request to be sent. |
| 474 */ | 472 */ |
| 475 inline void | 473 inline void |
| 476 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr ) | 474 VMS_WL__send_sem_request( void *semReqData, SlaveVP *callingPr ) |
| 477 { VMSReqst req; | 475 { VMSReqst req; |
| 478 | 476 |
| 479 req.reqType = semantic; | 477 req.reqType = semantic; |
| 480 req.semReqData = semReqData; | 478 req.semReqData = semReqData; |
| 481 req.nextReqst = callingPr->requests; | 479 req.nextReqst = callingPr->requests; |
| 482 callingPr->requests = &req; | 480 callingPr->requests = &req; |
| 483 | 481 |
| 484 VMS__suspend_procr( callingPr ); | 482 VMS_int__suspend_procr( callingPr ); |
| 485 } | 483 } |
| 486 | 484 |
| 487 | 485 |
| 488 inline void | 486 inline void |
| 489 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr ) | 487 VMS_WL__send_VMSSem_request( void *semReqData, SlaveVP *callingPr ) |
| 490 { VMSReqst req; | 488 { VMSReqst req; |
| 491 | 489 |
| 492 req.reqType = VMSSemantic; | 490 req.reqType = VMSSemantic; |
| 493 req.semReqData = semReqData; | 491 req.semReqData = semReqData; |
| 494 req.nextReqst = callingPr->requests; //gab any other preceeding | 492 req.nextReqst = callingPr->requests; //gab any other preceeding |
| 495 callingPr->requests = &req; | 493 callingPr->requests = &req; |
| 496 | 494 |
| 497 VMS__suspend_procr( callingPr ); | 495 VMS_int__suspend_procr( callingPr ); |
| 498 } | 496 } |
| 499 | 497 |
| 500 | 498 |
| 501 /* | 499 /* |
| 502 */ | 500 */ |
| 503 VMSReqst * | 501 VMSReqst * |
| 504 VMS__take_next_request_out_of( VirtProcr *procrWithReq ) | 502 VMS_PI__take_next_request_out_of( SlaveVP *procrWithReq ) |
| 505 { VMSReqst *req; | 503 { VMSReqst *req; |
| 506 | 504 |
| 507 req = procrWithReq->requests; | 505 req = procrWithReq->requests; |
| 508 if( req == NULL ) return NULL; | 506 if( req == NULL ) return NULL; |
| 509 | 507 |
| 511 return req; | 509 return req; |
| 512 } | 510 } |
| 513 | 511 |
| 514 | 512 |
| 515 inline void * | 513 inline void * |
| 516 VMS__take_sem_reqst_from( VMSReqst *req ) | 514 VMS_PI__take_sem_reqst_from( VMSReqst *req ) |
| 517 { | 515 { |
| 518 return req->semReqData; | 516 return req->semReqData; |
| 519 } | 517 } |
| 520 | 518 |
| 521 | 519 |
| 533 * TODO: Later change this, to give probes their own separate plugin & have | 531 * TODO: Later change this, to give probes their own separate plugin & have |
| 534 * VMS-core steer the request to appropriate plugin | 532 * VMS-core steer the request to appropriate plugin |
| 535 * Do the same for OS calls -- look later at it.. | 533 * Do the same for OS calls -- look later at it.. |
| 536 */ | 534 */ |
| 537 void inline | 535 void inline |
| 538 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv, | 536 VMS_PI__handle_VMSSemReq( VMSReqst *req, SlaveVP *requestingPr, void *semEnv, |
| 539 ResumePrFnPtr resumePrFnPtr ) | 537 ResumeVPFnPtr resumePrFnPtr ) |
| 540 { VMSSemReq *semReq; | 538 { VMSSemReq *semReq; |
| 541 IntervalProbe *newProbe; | 539 IntervalProbe *newProbe; |
| 542 | 540 |
| 543 semReq = req->semReqData; | 541 semReq = req->semReqData; |
| 544 | 542 |
| 545 newProbe = VMS__malloc( sizeof(IntervalProbe) ); | 543 newProbe = VMS_int__malloc( sizeof(IntervalProbe) ); |
| 546 newProbe->nameStr = VMS__strDup( semReq->nameStr ); | 544 newProbe->nameStr = VMS_int__strDup( semReq->nameStr ); |
| 547 newProbe->hist = NULL; | 545 newProbe->hist = NULL; |
| 548 newProbe->schedChoiceWasRecorded = FALSE; | 546 newProbe->schedChoiceWasRecorded = FALSE; |
| 549 | 547 |
| 550 //This runs in masterVP, so no race-condition worries | 548 //This runs in masterVP, so no race-condition worries |
| 551 newProbe->probeID = | 549 newProbe->probeID = |
| 574 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd | 572 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd |
| 575 * state, then that state gets freed (or sent to recycling) as a side-effect | 573 * state, then that state gets freed (or sent to recycling) as a side-effect |
| 576 * of dis-owning it. | 574 * of dis-owning it. |
| 577 */ | 575 */ |
| 578 void | 576 void |
| 579 VMS__dissipate_procr( VirtProcr *animatingPr ) | 577 VMS_int__dissipate_procr( SlaveVP *animatingPr ) |
| 580 { | 578 { |
| 581 //dis-own all locations owned by this processor, causing to be freed | 579 //dis-own all locations owned by this processor, causing to be freed |
| 582 // any locations that it is (was) sole owner of | 580 // any locations that it is (was) sole owner of |
| 583 //TODO: implement VMS__malloc system, including "give up ownership" | 581 //TODO: implement VMS__malloc system, including "give up ownership" |
| 584 | 582 |
| 587 // been alloc'd with VMS__malloc, or freed by the level above animPr. | 585 // been alloc'd with VMS__malloc, or freed by the level above animPr. |
| 588 //So, all that's left to free here is the stack and the VirtProcr struc | 586 //So, all that's left to free here is the stack and the VirtProcr struc |
| 589 // itself | 587 // itself |
| 590 //Note, should not stack-allocate initial data -- no guarantee, in | 588 //Note, should not stack-allocate initial data -- no guarantee, in |
| 591 // general that creating processor will outlive ones it creates. | 589 // general that creating processor will outlive ones it creates. |
| 592 VMS__free( animatingPr->startOfStack ); | 590 VMS_int__free( animatingPr->startOfStack ); |
| 593 VMS__free( animatingPr ); | 591 VMS_int__free( animatingPr ); |
| 594 } | 592 } |
| 595 | 593 |
| 596 | 594 |
| 597 //TODO: look at architecting cleanest separation between request handler | 595 //TODO: look at architecting cleanest separation between request handler |
| 598 // and master loop, for dissipate, create, shutdown, and other non-semantic | 596 // and master loop, for dissipate, create, shutdown, and other non-semantic |
| 625 * other words, this can be used as an abort, or else it should only be | 623 * other words, this can be used as an abort, or else it should only be |
| 626 * called when all AppVPs have finished dissipate requests -- only at that | 624 * called when all AppVPs have finished dissipate requests -- only at that |
| 627 * point is it sure that all results have completed. | 625 * point is it sure that all results have completed. |
| 628 */ | 626 */ |
| 629 void | 627 void |
| 630 VMS__shutdown() | 628 VMS_int__shutdown() |
| 631 { int coreIdx; | 629 { int coreIdx; |
| 632 VirtProcr *shutDownPr; | 630 SlaveVP *shutDownPr; |
| 633 | 631 |
| 634 //create the shutdown processors, one for each core loop -- put them | 632 //create the shutdown processors, one for each core loop -- put them |
| 635 // directly into the Q -- each core will die when gets one | 633 // directly into the Q -- each core will die when gets one |
| 636 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 634 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 637 { //Note, this is running in the master | 635 { //Note, this is running in the master |
| 638 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); | 636 shutDownPr = VMS_int__create_procr( &endOSThreadFn, NULL ); |
| 639 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); | 637 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); |
| 640 } | 638 } |
| 641 | 639 |
| 642 } | 640 } |
| 643 | 641 |
| 654 * animator of the AppVP that is in turn animating this function over | 652 * animator of the AppVP that is in turn animating this function over |
| 655 * to core loop function -- note that this slices out a level of virtual | 653 * to core loop function -- note that this slices out a level of virtual |
| 656 * processors). | 654 * processors). |
| 657 */ | 655 */ |
| 658 void | 656 void |
| 659 endOSThreadFn( void *initData, VirtProcr *animatingPr ) | 657 endOSThreadFn( void *initData, SlaveVP *animatingPr ) |
| 660 { | 658 { |
| 661 #ifdef SEQUENTIAL | 659 #ifdef SEQUENTIAL |
| 662 asmTerminateCoreLoopSeq(animatingPr); | 660 asmTerminateCoreLoopSeq(animatingPr); |
| 663 #else | 661 #else |
| 664 asmTerminateCoreLoop(animatingPr); | 662 asmTerminateCoreLoop(animatingPr); |
| 667 | 665 |
| 668 | 666 |
| 669 /*This is called from the startup & shutdown | 667 /*This is called from the startup & shutdown |
| 670 */ | 668 */ |
| 671 void | 669 void |
| 672 VMS__cleanup_at_end_of_shutdown() | 670 VMS_int__cleanup_at_end_of_shutdown() |
| 673 { | 671 { |
| 674 //unused | 672 //unused |
| 675 //VMSQueueStruc **readyToAnimateQs; | 673 //VMSQueueStruc **readyToAnimateQs; |
| 676 //int coreIdx; | 674 //int coreIdx; |
| 677 //VirtProcr **masterVPs; | 675 //VirtProcr **masterVPs; |
| 705 printHist( _VMSMasterEnv->pluginTimeHist ); | 703 printHist( _VMSMasterEnv->pluginTimeHist ); |
| 706 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 704 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 707 { | 705 { |
| 708 freeVMSQ( readyToAnimateQs[ coreIdx ] ); | 706 freeVMSQ( readyToAnimateQs[ coreIdx ] ); |
| 709 //master VPs were created external to VMS, so use external free | 707 //master VPs were created external to VMS, so use external free |
| 710 VMS__dissipate_procr( masterVPs[ coreIdx ] ); | 708 VMS_int__dissipate_procr( masterVPs[ coreIdx ] ); |
| 711 | 709 |
| 712 freeSchedSlots( allSchedSlots[ coreIdx ] ); | 710 freeSchedSlots( allSchedSlots[ coreIdx ] ); |
| 713 } | 711 } |
| 714 #endif | 712 #endif |
| 715 #ifdef MEAS__TIME_STAMP_SUSP | 713 #ifdef MEAS__TIME_STAMP_SUSP |
| 716 printHist( _VMSMasterEnv->pluginTimeHist ); | 714 printHist( _VMSMasterEnv->pluginTimeHist ); |
| 717 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | 715 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 718 { | 716 { |
| 719 freeVMSQ( readyToAnimateQs[ coreIdx ] ); | 717 freeVMSQ( readyToAnimateQs[ coreIdx ] ); |
| 720 //master VPs were created external to VMS, so use external free | 718 //master VPs were created external to VMS, so use external free |
| 721 VMS__dissipate_procr( masterVPs[ coreIdx ] ); | 719 VMS_int__dissipate_procr( masterVPs[ coreIdx ] ); |
| 722 | 720 |
| 723 freeSchedSlots( allSchedSlots[ coreIdx ] ); | 721 freeSchedSlots( allSchedSlots[ coreIdx ] ); |
| 724 } | 722 } |
| 725 #endif | 723 #endif |
| 726 | 724 |
| 761 | 759 |
| 762 /*Later, improve this -- for now, just exits the application after printing | 760 /*Later, improve this -- for now, just exits the application after printing |
| 763 * the error message. | 761 * the error message. |
| 764 */ | 762 */ |
| 765 void | 763 void |
| 766 VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData ) | 764 VMS_PI__throw_exception( char *msgStr, SlaveVP *reqstPr, VMSExcp *excpData ) |
| 767 { | 765 { |
| 768 printf("%s",msgStr); | 766 printf("%s",msgStr); |
| 769 fflush(stdin); | 767 fflush(stdin); |
| 770 exit(1); | 768 exit(1); |
| 771 } | 769 } |
