Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
comparison VMS.c @ 24:2b161e1a50ee
1st working version -- as far as can tell due to SEH bugs
| author | Me |
|---|---|
| date | Wed, 07 Jul 2010 13:15:54 -0700 |
| parents | 1dbc7f6e3e67 |
| children | c556193f7211 |
comparison
equal
deleted
inserted
replaced
| 6:641e4ced0df8 | 7:c6b65a94790f |
|---|---|
| 69 //Set slot 0 to be the master virt procr & set flags just in case | 69 //Set slot 0 to be the master virt procr & set flags just in case |
| 70 masterEnv->schedSlots[0]->needsProcrAssigned = FALSE; //says don't touch | 70 masterEnv->schedSlots[0]->needsProcrAssigned = FALSE; //says don't touch |
| 71 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch | 71 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch |
| 72 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr; | 72 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr; |
| 73 masterEnv->masterVirtPr->schedSlot = masterEnv->schedSlots[0]; | 73 masterEnv->masterVirtPr->schedSlot = masterEnv->schedSlots[0]; |
| 74 masterEnv->stillRunning = FALSE; | |
| 74 | 75 |
| 75 //First core loop to start up gets this, which will schedule seed Pr | 76 //First core loop to start up gets this, which will schedule seed Pr |
| 76 //TODO: debug: check address of masterVirtPr | 77 //TODO: debug: check address of masterVirtPr |
| 77 writeCASQ( masterEnv->masterVirtPr, workQ ); | 78 writeCASQ( masterEnv->masterVirtPr, workQ ); |
| 78 | 79 |
| 79 numProcrsCreated = 1; | 80 numProcrsCreated = 1; |
| 81 | |
| 82 //======================================================================== | |
| 83 // Create the Threads | |
| 84 int coreIdx; | |
| 85 | |
| 86 //Make params given to the win threads that animate the core loops | |
| 87 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | |
| 88 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) ); | |
| 89 coreLoopThdParams[coreIdx]->coreNum = coreIdx; | |
| 90 | |
| 91 //make the core loop threads, born in suspended state | |
| 92 coreLoopThdHandles[ coreIdx ] = | |
| 93 CreateThread ( NULL, // Security attributes | |
| 94 0, // Stack size | |
| 95 coreLoop, | |
| 96 coreLoopThdParams[coreIdx], | |
| 97 CREATE_SUSPENDED, | |
| 98 &(coreLoopThdIds[coreIdx]) | |
| 99 ); | |
| 100 } | |
| 101 | |
| 80 } | 102 } |
| 81 | 103 |
| 82 | 104 |
| 83 void | 105 void |
| 84 create_sched_slots( MasterEnv *masterEnv ) | 106 create_sched_slots( MasterEnv *masterEnv ) |
| 101 } | 123 } |
| 102 | 124 |
| 103 | 125 |
| 104 /*Semantic layer calls this when it want the system to start running.. | 126 /*Semantic layer calls this when it want the system to start running.. |
| 105 * | 127 * |
| 106 *This creates the core loops, pins them to physical cores, gives them the | 128 *This starts the core loops running then waits for them to exit. |
| 107 * pointer to the workQ, and starts them running. | 129 */ |
| 108 */ | 130 void |
| 109 void | 131 VMS__start_the_work_then_wait_until_done() |
| 110 VMS__start() | |
| 111 { int coreIdx; | 132 { int coreIdx; |
| 112 | 133 //Start the core loops running |
| 113 //TODO: Save "orig" stack pointer and frame ptr -- restore in VMS__end() | 134 //=========================================================================== |
| 114 //Create the win threads that animate the core loops | 135 LARGE_INTEGER stPerfCount, endPerfCount, countFreq; |
| 136 unsigned long long count = 0, freq = 0; | |
| 137 double runTime; | |
| 138 | |
| 139 QueryPerformanceCounter( &stPerfCount ); | |
| 140 | |
| 141 //start them running | |
| 142 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | |
| 143 { //Create the threads | |
| 144 ResumeThread( coreLoopThdHandles[coreIdx] ); //starts thread | |
| 145 } | |
| 146 | |
| 147 //wait for all to complete | |
| 115 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 148 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 116 { | 149 { |
| 117 coreLoopThdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) ); | 150 WaitForSingleObject(coreLoopThdHandles[coreIdx], INFINITE); |
| 118 coreLoopThdParams[coreIdx]->coreNum = coreIdx; | |
| 119 | |
| 120 coreLoopThdHandles[coreIdx] = | |
| 121 CreateThread ( NULL, // Security attributes | |
| 122 0, // Stack size | |
| 123 coreLoop, | |
| 124 coreLoopThdParams[coreIdx], | |
| 125 CREATE_SUSPENDED, | |
| 126 &(coreLoopThdIds[coreIdx]) | |
| 127 ); | |
| 128 ResumeThread( coreLoopThdHandles[coreIdx] ); //starts thread | |
| 129 } | 151 } |
| 152 | |
| 153 //NOTE: do not clean up VMS env here -- semantic layer has to have | |
| 154 // a chance to clean up its environment first, then do a call to free | |
| 155 // the Master env and rest of VMS locations | |
| 156 | |
| 157 QueryPerformanceCounter( &endPerfCount ); | |
| 158 count = endPerfCount.QuadPart - stPerfCount.QuadPart; | |
| 159 | |
| 160 QueryPerformanceFrequency( &countFreq ); | |
| 161 freq = countFreq.QuadPart; | |
| 162 runTime = (double)count / (double)freq; | |
| 163 | |
| 164 printf("\n Time startup to shutdown: %f\n", runTime); | |
| 165 fflush( stdin ); | |
| 130 } | 166 } |
| 131 | 167 |
| 132 | 168 |
| 133 | 169 |
| 134 /*Create stack, then create __cdecl structure on it and put initialData and | 170 /*Create stack, then create __cdecl structure on it and put initialData and |
| 281 | 317 |
| 282 | 318 |
| 283 /*This inserts the semantic-layer's request data into standard VMS carrier | 319 /*This inserts the semantic-layer's request data into standard VMS carrier |
| 284 */ | 320 */ |
| 285 inline void | 321 inline void |
| 286 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr ) | 322 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr ) |
| 287 { VMSReqst *req; | 323 { VMSReqst *req; |
| 288 | 324 |
| 289 req = malloc( sizeof(VMSReqst) ); | 325 req = malloc( sizeof(VMSReqst) ); |
| 290 // req->virtProcrFrom = callingPr; | 326 // req->virtProcrFrom = callingPr; |
| 291 req->reqType = semantic; | 327 req->reqType = semantic; |
| 293 req->nextReqst = callingPr->requests; | 329 req->nextReqst = callingPr->requests; |
| 294 callingPr->requests = req; | 330 callingPr->requests = req; |
| 295 } | 331 } |
| 296 | 332 |
| 297 | 333 |
| 298 /*This creates a request of type "dissipate" -- which will cause the virt | |
| 299 * processor's state and owned locations to be freed | |
| 300 */ | |
| 301 inline void | |
| 302 VMS__send_dissipate_request( VirtProcr *procrToDissipate ) | |
| 303 { VMSReqst *req; | |
| 304 | |
| 305 req = malloc( sizeof(VMSReqst) ); | |
| 306 // req->virtProcrFrom = callingPr; | |
| 307 req->reqType = dissipate; | |
| 308 req->nextReqst = procrToDissipate->requests; | |
| 309 procrToDissipate->requests = req; | |
| 310 } | |
| 311 | |
| 312 | 334 |
| 313 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion | 335 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion |
| 314 // of a request -- IE call with both a virt procr and a fn-ptr to request | 336 // of a request -- IE call with both a virt procr and a fn-ptr to request |
| 315 // freer (or maybe put request freer as a field in virt procr?) | 337 // freer (or maybe put request freer as a field in virt procr?) |
| 316 void | 338 void |
| 320 req = procrWithReq->requests; | 342 req = procrWithReq->requests; |
| 321 procrWithReq->requests = procrWithReq->requests->nextReqst; | 343 procrWithReq->requests = procrWithReq->requests->nextReqst; |
| 322 free( req ); | 344 free( req ); |
| 323 } | 345 } |
| 324 | 346 |
| 325 /*This must be called by the request handler plugin -- it cannot be called | 347 |
| 326 * from the semantic library "dissipate processor" function -- instead, the | 348 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion |
| 327 * semantic layer has to generate a request for the plug-in to call this | 349 // of a request -- IE call with both a virt procr and a fn-ptr to request |
| 328 * function. | 350 // freer (also maybe put sem request freer as a field in virt procr?) |
| 329 *The reason is that this frees the virtual processor's stack -- which is | 351 void |
| 330 * still in use inside semantic library calls! | 352 VMS__free_request( VMSReqst *req ) |
| 331 * | 353 { |
| 332 *This frees or recycles all the state owned by and comprising the animating | 354 free( req ); |
| 333 * virtual procr. It frees any state that was malloc'd by the VMS system | 355 } |
| 334 * itself, and asks the VMS system to dis-own any VMS__malloc'd locations. | 356 |
| 335 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd | 357 VMSReqst * |
| 336 * state, then that state gets freed (or sent to recycling) as a side-effect | 358 VMS__take_top_request_from( VirtProcr *procrWithReq ) |
| 337 * of dis-owning it. | 359 { VMSReqst *req; |
| 338 */ | 360 |
| 339 void | 361 req = procrWithReq->requests; |
| 340 VMS__free_procr_locs( VirtProcr *animatingPr ) | 362 if( req == NULL ) return req; |
| 341 { | |
| 342 //dis-own all locations owned by this processor, causing to be freed | |
| 343 // any locations that it is (was) sole owner of | |
| 344 //TODO: implement VMS__malloc system, including "give up ownership" | |
| 345 | |
| 346 VMS__remove_and_free_top_request( animatingPr ); | |
| 347 free( animatingPr->startOfStack ); | |
| 348 | 363 |
| 349 //NOTE: animatingPr->semanticData should either have been allocated | 364 procrWithReq->requests = procrWithReq->requests->nextReqst; |
| 350 // with VMS__malloc, or else freed in the request handler plug-in. | 365 return req; |
| 351 //NOTE: initialData was given to the processor, so should either have | 366 } |
| 352 // been alloc'd with VMS__malloc, or freed by the level above animPr. | 367 |
| 353 //So, all that's left to free here is the VirtProcr struc itself | 368 inline int |
| 354 free( animatingPr ); | 369 VMS__isSemanticReqst( VMSReqst *req ) |
| 370 { | |
| 371 return ( req->reqType == semantic ); | |
| 372 } | |
| 373 | |
| 374 | |
| 375 inline void * | |
| 376 VMS__take_sem_reqst_from( VMSReqst *req ) | |
| 377 { | |
| 378 return req->semReqData; | |
| 379 } | |
| 380 | |
| 381 inline int | |
| 382 VMS__isDissipateReqst( VMSReqst *req ) | |
| 383 { | |
| 384 return ( req->reqType == dissipate ); | |
| 385 } | |
| 386 | |
| 387 inline int | |
| 388 VMS__isCreateReqst( VMSReqst *req ) | |
| 389 { | |
| 390 return ( req->reqType == regCreated ); | |
| 391 } | |
| 392 | |
| 393 void | |
| 394 VMS__send_register_new_procr_request(VirtProcr *newPr, VirtProcr *reqstingPr) | |
| 395 { VMSReqst *req; | |
| 396 | |
| 397 req = malloc( sizeof(VMSReqst) ); | |
| 398 req->reqType = regCreated; | |
| 399 req->semReqData = newPr; | |
| 400 req->nextReqst = reqstingPr->requests; | |
| 401 reqstingPr->requests = req; | |
| 402 | |
| 403 VMS__suspend_procr( reqstingPr ); | |
| 355 } | 404 } |
| 356 | 405 |
| 357 | 406 |
| 358 /*The semantic layer figures out when the work is done ( perhaps by a call | 407 /*The semantic layer figures out when the work is done ( perhaps by a call |
| 359 * in the application to "work all done", or perhaps all the virtual | 408 * in the application to "work all done", or perhaps all the virtual |
| 382 { | 431 { |
| 383 return VMS__create_procr( &shutdownFn, NULL ); | 432 return VMS__create_procr( &shutdownFn, NULL ); |
| 384 } | 433 } |
| 385 | 434 |
| 386 | 435 |
| 436 /*This must be called by the request handler plugin -- it cannot be called | |
| 437 * from the semantic library "dissipate processor" function -- instead, the | |
| 438 * semantic layer has to generate a request for the plug-in to call this | |
| 439 * function. | |
| 440 *The reason is that this frees the virtual processor's stack -- which is | |
| 441 * still in use inside semantic library calls! | |
| 442 * | |
| 443 *This frees or recycles all the state owned by and comprising the VMS | |
| 444 * portion of the animating virtual procr. The request handler must first | |
| 445 * free any semantic data created for the processor that didn't use the | |
| 446 * VMS_malloc mechanism. Then it calls this, which first asks the malloc | |
| 447 * system to disown any state that did use VMS_malloc, and then frees the | |
| 448 * statck and the processor-struct itself. | |
| 449 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd | |
| 450 * state, then that state gets freed (or sent to recycling) as a side-effect | |
| 451 * of dis-owning it. | |
| 452 */ | |
| 453 void | |
| 454 VMS__free_procr_locs( VirtProcr *animatingPr ) | |
| 455 { | |
| 456 //dis-own all locations owned by this processor, causing to be freed | |
| 457 // any locations that it is (was) sole owner of | |
| 458 //TODO: implement VMS__malloc system, including "give up ownership" | |
| 459 | |
| 460 //The dissipate request might still be attached, so remove and free it | |
| 461 VMS__remove_and_free_top_request( animatingPr ); | |
| 462 free( animatingPr->startOfStack ); | |
| 463 | |
| 464 //NOTE: initialData was given to the processor, so should either have | |
| 465 // been alloc'd with VMS__malloc, or freed by the level above animPr. | |
| 466 //So, all that's left to free here is the stack and the VirtProcr struc | |
| 467 // itself | |
| 468 free( animatingPr->startOfStack ); | |
| 469 free( animatingPr ); | |
| 470 } | |
| 471 | |
| 472 | |
| 473 | |
| 387 /*This is the function run by the special "shut-down" processor | 474 /*This is the function run by the special "shut-down" processor |
| 388 * | 475 * |
| 389 *The _VMSMasterEnv is needed by this shut down function, so the "wait" | 476 *The _VMSMasterEnv is needed by this shut down function, so the "wait" |
| 390 * function run in the main loop has to free it, and the thread-related | 477 * function run in the main loop has to free it, and the thread-related |
| 391 * locations (coreLoopThdParams a.s.o.). | 478 * locations (coreLoopThdParams a.s.o.). |
| 420 | 507 |
| 421 //This is an issue: the animating processor of this function may not | 508 //This is an issue: the animating processor of this function may not |
| 422 // get its request handled before all the cores have shutdown. | 509 // get its request handled before all the cores have shutdown. |
| 423 //TODO: after all the threads stop, clean out the MasterEnv, the | 510 //TODO: after all the threads stop, clean out the MasterEnv, the |
| 424 // SemanticEnv, and the workQ before returning. | 511 // SemanticEnv, and the workQ before returning. |
| 425 VMS__send_dissipate_request( animatingPr ); | 512 VMS__dissipate_procr( animatingPr ); //will never come back from this |
| 426 VMS__suspend_procr( animatingPr ); //will never come back from this | 513 } |
| 427 } | 514 |
| 428 | 515 |
| 429 | 516 /*This has to free anything allocated during VMS_init, and any other alloc'd |
| 517 * locations that might be left over. | |
| 518 */ | |
| 519 void | |
| 520 VMS__shutdown() | |
| 521 { int i; | |
| 522 | |
| 523 free( _VMSWorkQ ); | |
| 524 free( _VMSMasterEnv->filledSlots ); | |
| 525 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | |
| 526 { | |
| 527 free( _VMSMasterEnv->schedSlots[i] ); | |
| 528 } | |
| 529 | |
| 530 free( _VMSMasterEnv->schedSlots); | |
| 531 VMS__free_procr_locs( _VMSMasterEnv->masterVirtPr ); | |
| 532 | |
| 533 free( _VMSMasterEnv ); | |
| 534 } | |
| 535 | |
| 536 | |
| 537 //=========================================================================== | |
| 430 | 538 |
| 431 inline TSCount getTSCount() | 539 inline TSCount getTSCount() |
| 432 { unsigned int low, high; | 540 { unsigned int low, high; |
| 433 TSCount out; | 541 TSCount out; |
| 434 | 542 |
