Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
comparison VMS.c @ 31:e69579a0e797
Works multi-core.. pinned VP to a core loop
| author | Me |
|---|---|
| date | Wed, 01 Sep 2010 08:23:39 -0700 |
| parents | c8823e0bb2b4 |
| children | 17d20e5cf924 |
comparison
equal
deleted
inserted
replaced
| 12:e65c232fa04b | 13:9b4e00ead10c |
|---|---|
| 16 | 16 |
| 17 //=========================================================================== | 17 //=========================================================================== |
| 18 void | 18 void |
| 19 shutdownFn( void *dummy, VirtProcr *dummy2 ); | 19 shutdownFn( void *dummy, VirtProcr *dummy2 ); |
| 20 | 20 |
| 21 void | 21 SchedSlot ** |
| 22 create_sched_slots( MasterEnv *masterEnv ); | 22 create_sched_slots(); |
| 23 | 23 |
| 24 void | 24 void |
| 25 create_masterEnv(); | 25 create_masterEnv(); |
| 26 | 26 |
| 27 void | 27 void |
| 46 *The semantic layer is isolated from the VMS internals by making the | 46 *The semantic layer is isolated from the VMS internals by making the |
| 47 * semantic layer do setup to a state that it's ready with its | 47 * semantic layer do setup to a state that it's ready with its |
| 48 * initial virt procrs, ready to schedule them to slots when the masterLoop | 48 * initial virt procrs, ready to schedule them to slots when the masterLoop |
| 49 * asks. Without this pattern, the semantic layer's setup would | 49 * asks. Without this pattern, the semantic layer's setup would |
| 50 * have to modify slots directly to assign the initial virt-procrs, and put | 50 * have to modify slots directly to assign the initial virt-procrs, and put |
| 51 * them into the workQ itself, breaking the isolation completely. | 51 * them into the readyToAnimateQ itself, breaking the isolation completely. |
| 52 * | 52 * |
| 53 * | 53 * |
| 54 *The semantic layer creates the initial virt procr(s), and adds its | 54 *The semantic layer creates the initial virt procr(s), and adds its |
| 55 * own environment to masterEnv, and fills in the pointers to | 55 * own environment to masterEnv, and fills in the pointers to |
| 56 * the requestHandler and slaveScheduler plug-in functions | 56 * the requestHandler and slaveScheduler plug-in functions |
| 75 create_masterEnv(); | 75 create_masterEnv(); |
| 76 } | 76 } |
| 77 | 77 |
| 78 void | 78 void |
| 79 create_masterEnv() | 79 create_masterEnv() |
| 80 { MasterEnv *masterEnv; | 80 { MasterEnv *masterEnv; |
| 81 VMSQueueStruc *workQ; | 81 SRSWQueueStruc **readyToAnimateQs; |
| 82 | 82 int coreIdx; |
| 83 //Make the central work-queue | 83 VirtProcr **masterVPs; |
| 84 _VMSWorkQ = makeVMSQ(); | 84 SchedSlot ***allSchedSlots; //ptr to array of ptrs |
| 85 workQ = _VMSWorkQ; | 85 |
| 86 | 86 //Make the master env, which holds everything else |
| 87 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); | 87 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); |
| 88 masterEnv = _VMSMasterEnv; | 88 masterEnv = _VMSMasterEnv; |
| 89 | 89 //Need to set start pt here 'cause used by seed procr, which is created |
| 90 //create the master virtual processor | 90 // before the first core loop starts up. -- not sure how yet.. |
| 91 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv ); | 91 // masterEnv->coreLoopStartPt = ; |
| 92 | 92 // masterEnv->coreLoopEndPt = ; |
| 93 create_sched_slots( masterEnv ); | 93 |
| 94 | 94 //Make a readyToAnimateQ for each core loop |
| 95 masterEnv->stillRunning = FALSE; | 95 readyToAnimateQs = malloc( NUM_CORES * sizeof(SRSWQueueStruc *) ); |
| 96 masterEnv->numToPrecede = NUM_CORES; | 96 masterVPs = malloc( NUM_CORES * sizeof(VirtProcr *) ); |
| 97 | 97 |
| 98 //First core loop to start up gets this, which will schedule seed Pr | 98 //One array for each core, 3 in array, core's masterVP scheds all |
| 99 //TODO: debug: check address of masterVirtPr | 99 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) ); |
| 100 writeVMSQ( masterEnv->masterVirtPr, workQ ); | 100 |
| 101 | 101 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) |
| 102 numProcrsCreated = 1; //global counter for debugging | 102 { |
| 103 readyToAnimateQs[ coreIdx ] = makeSRSWQ(); | |
| 104 | |
| 105 //Q: should give masterVP core-specific into as its init data? | |
| 106 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv ); | |
| 107 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; | |
| 108 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core | |
| 109 } | |
| 110 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; | |
| 111 _VMSMasterEnv->masterVPs = masterVPs; | |
| 112 _VMSMasterEnv->allSchedSlots = allSchedSlots; | |
| 113 | |
| 114 | |
| 115 | |
| 116 //Aug 19, 2010: no longer need to place initial masterVP into queue | |
| 117 // because coreLoop now controls -- animates its masterVP when no work | |
| 118 | |
| 103 | 119 |
| 104 //==================== malloc substitute ======================== | 120 //==================== malloc substitute ======================== |
| 105 // | 121 // |
| 106 //Testing whether malloc is using thread-local storage and therefore | 122 //Testing whether malloc is using thread-local storage and therefore |
| 107 // causing unreliable behavior. | 123 // causing unreliable behavior. |
| 141 } | 157 } |
| 142 free( _VMSMasterEnv->freeList ); | 158 free( _VMSMasterEnv->freeList ); |
| 143 } | 159 } |
| 144 */ | 160 */ |
| 145 | 161 |
| 146 void | 162 SchedSlot ** |
| 147 create_sched_slots( MasterEnv *masterEnv ) | 163 create_sched_slots() |
| 148 { SchedSlot **schedSlots, **filledSlots; | 164 { SchedSlot **schedSlots; |
| 149 int i; | 165 int i; |
| 150 | 166 |
| 151 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); | 167 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); |
| 152 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); | |
| 153 masterEnv->schedSlots = schedSlots; | |
| 154 masterEnv->filledSlots = filledSlots; | |
| 155 | 168 |
| 156 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 169 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) |
| 157 { | 170 { |
| 158 schedSlots[i] = malloc( sizeof(SchedSlot) ); | 171 schedSlots[i] = malloc( sizeof(SchedSlot) ); |
| 159 | 172 |
| 160 //Set state to mean "handling requests done, slot needs filling" | 173 //Set state to mean "handling requests done, slot needs filling" |
| 161 schedSlots[i]->workIsDone = FALSE; | 174 schedSlots[i]->workIsDone = FALSE; |
| 162 schedSlots[i]->needsProcrAssigned = TRUE; | 175 schedSlots[i]->needsProcrAssigned = TRUE; |
| 163 } | 176 } |
| 177 return schedSlots; | |
| 178 } | |
| 179 | |
| 180 | |
| 181 void | |
| 182 freeSchedSlots( SchedSlot **schedSlots ) | |
| 183 { int i; | |
| 184 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | |
| 185 { | |
| 186 free( schedSlots[i] ); | |
| 187 } | |
| 188 free( schedSlots ); | |
| 164 } | 189 } |
| 165 | 190 |
| 166 | 191 |
| 167 void | 192 void |
| 168 create_the_coreLoop_OS_threads() | 193 create_the_coreLoop_OS_threads() |
| 265 | 290 |
| 266 newPr = malloc( sizeof(VirtProcr) ); | 291 newPr = malloc( sizeof(VirtProcr) ); |
| 267 newPr->procrID = numProcrsCreated++; | 292 newPr->procrID = numProcrsCreated++; |
| 268 newPr->nextInstrPt = fnPtr; | 293 newPr->nextInstrPt = fnPtr; |
| 269 newPr->initialData = initialData; | 294 newPr->initialData = initialData; |
| 295 newPr->requests = NULL; | |
| 296 // newPr->coreLoopStartPt = _VMSMasterEnv->coreLoopStartPt; | |
| 270 | 297 |
| 271 //fnPtr takes two params -- void *initData & void *animProcr | 298 //fnPtr takes two params -- void *initData & void *animProcr |
| 272 //alloc stack locations, make stackPtr be the highest addr minus room | 299 //alloc stack locations, make stackPtr be the highest addr minus room |
| 273 // for 2 params + return addr. Return addr (NULL) is in loc pointed to | 300 // for 2 params + return addr. Return addr (NULL) is in loc pointed to |
| 274 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above | 301 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above |
| 312 // coreIdx = callingPr->coreAnimatedBy; | 339 // coreIdx = callingPr->coreAnimatedBy; |
| 313 | 340 |
| 314 stackPtrAddr = &(callingPr->stackPtr); | 341 stackPtrAddr = &(callingPr->stackPtr); |
| 315 framePtrAddr = &(callingPr->framePtr); | 342 framePtrAddr = &(callingPr->framePtr); |
| 316 | 343 |
| 317 jmpPt = callingPr->coreLoopStartPt; | 344 jmpPt = _VMSMasterEnv->coreLoopStartPt; |
| 318 coreLoopFramePtr = callingPr->coreLoopFramePtr;//need this only | 345 coreLoopFramePtr = callingPr->coreLoopFramePtr;//need this only |
| 319 coreLoopStackPtr = callingPr->coreLoopStackPtr;//shouldn't need -- safety | 346 coreLoopStackPtr = callingPr->coreLoopStackPtr;//shouldn't need -- safety |
| 320 | 347 |
| 321 //Eclipse's compilation sequence complains -- so break into two | 348 //Eclipse's compilation sequence complains -- so break into two |
| 322 // separate in-line assembly pieces | 349 // separate in-line assembly pieces |
| 348 return; | 375 return; |
| 349 } | 376 } |
| 350 | 377 |
| 351 | 378 |
| 352 | 379 |
| 353 /*This is equivalent to "jump back to core loop" -- it's mainly only used | |
| 354 * just after adding dissipate request to a processor -- so the semantic | |
| 355 * layer is the only place it will be seen and/or used. | |
| 356 * | |
| 357 *It does almost the same thing as suspend, except don't need to save the | |
| 358 * stack nor set the nextInstrPt | |
| 359 * | |
| 360 *As of June 30, 2010 just implementing as a call to suspend -- just sugar | |
| 361 */ | |
| 362 void | |
| 363 VMS__return_from_fn( VirtProcr *animatingPr ) | |
| 364 { | |
| 365 VMS__suspend_procr( animatingPr ); | |
| 366 } | |
| 367 | |
| 368 | 380 |
| 369 /*Not sure yet the form going to put "dissipate" in, so this is the third | 381 /*Not sure yet the form going to put "dissipate" in, so this is the third |
| 370 * possibility -- the semantic layer can just make a macro that looks like | 382 * possibility -- the semantic layer can just make a macro that looks like |
| 371 * a call to its name, then expands to a call to this. | 383 * a call to its name, then expands to a call to this. |
| 372 * | 384 * |
| 437 | 449 |
| 438 | 450 |
| 439 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion | 451 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion |
| 440 // of a request -- IE call with both a virt procr and a fn-ptr to request | 452 // of a request -- IE call with both a virt procr and a fn-ptr to request |
| 441 // freer (also maybe put sem request freer as a field in virt procr?) | 453 // freer (also maybe put sem request freer as a field in virt procr?) |
| 442 //VMSHW relies right now on this only freeing VMS layer of request -- the | 454 //SSR relies right now on this only freeing VMS layer of request -- the |
| 443 // semantic portion of request is alloc'd and freed by request handler | 455 // semantic portion of request is alloc'd and freed by request handler |
| 444 void | 456 void |
| 445 VMS__free_request( VMSReqst *req ) | 457 VMS__free_request( VMSReqst *req ) |
| 446 { | 458 { |
| 447 free( req ); | 459 free( req ); |
| 451 VMS__take_top_request_from( VirtProcr *procrWithReq ) | 463 VMS__take_top_request_from( VirtProcr *procrWithReq ) |
| 452 { VMSReqst *req; | 464 { VMSReqst *req; |
| 453 | 465 |
| 454 req = procrWithReq->requests; | 466 req = procrWithReq->requests; |
| 455 if( req == NULL ) return req; | 467 if( req == NULL ) return req; |
| 456 | 468 |
| 457 procrWithReq->requests = procrWithReq->requests->nextReqst; | 469 procrWithReq->requests = procrWithReq->requests->nextReqst; |
| 458 return req; | 470 return req; |
| 471 } | |
| 472 | |
| 473 VMSReqst * | |
| 474 VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq ) | |
| 475 { VMSReqst *req; | |
| 476 | |
| 477 req = procrWithReq->requests; | |
| 478 if( req == NULL ) return req; | |
| 479 | |
| 480 procrWithReq->requests = procrWithReq->requests->nextReqst; | |
| 481 VMS__free_request( req ); | |
| 482 return procrWithReq->requests; | |
| 459 } | 483 } |
| 460 | 484 |
| 461 inline int | 485 inline int |
| 462 VMS__isSemanticReqst( VMSReqst *req ) | 486 VMS__isSemanticReqst( VMSReqst *req ) |
| 463 { | 487 { |
| 560 * locations it needs, and give ownership to masterVP. Then, they will be | 584 * locations it needs, and give ownership to masterVP. Then, they will be |
| 561 * automatically freed when the masterVP is dissipated. (This happens after | 585 * automatically freed when the masterVP is dissipated. (This happens after |
| 562 * the core loop threads have all exited) | 586 * the core loop threads have all exited) |
| 563 * | 587 * |
| 564 *In here,create one core-loop shut-down processor for each core loop and put | 588 *In here,create one core-loop shut-down processor for each core loop and put |
| 565 * them all directly into the workQ. | 589 * them all directly into the readyToAnimateQ. |
| 566 *Note, this function can ONLY be called after the semantic environment no | 590 *Note, this function can ONLY be called after the semantic environment no |
| 567 * longer cares if AppVPs get animated after the point this is called. In | 591 * longer cares if AppVPs get animated after the point this is called. In |
| 568 * other words, this can be used as an abort, or else it should only be | 592 * other words, this can be used as an abort, or else it should only be |
| 569 * called when all AppVPs have finished dissipate requests -- only at that | 593 * called when all AppVPs have finished dissipate requests -- only at that |
| 570 * point is it sure that all results have completed. | 594 * point is it sure that all results have completed. |
| 571 */ | 595 */ |
| 572 void | 596 void |
| 573 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr ) | 597 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr ) |
| 574 { int coreIdx; | 598 { int coreIdx; |
| 575 VirtProcr *shutDownPr; | 599 VirtProcr *shutDownPr; |
| 576 VMSQueueStruc *workQ = _VMSWorkQ; | |
| 577 | 600 |
| 578 //create the shutdown processors, one for each core loop -- put them | 601 //create the shutdown processors, one for each core loop -- put them |
| 579 // directly into _VMSWorkQ -- each core will die when gets one, so | 602 // directly into the Q -- each core will die when gets one |
| 580 // the system distributes them evenly itself. | |
| 581 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) | 603 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) |
| 582 { | 604 { |
| 583 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); | 605 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); |
| 584 writeVMSQ( shutDownPr, workQ ); | 606 writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); |
| 585 } | 607 } |
| 586 | 608 |
| 587 } | 609 } |
| 588 | 610 |
| 589 | 611 |
| 618 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \ | 640 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \ |
| 619 ); | 641 ); |
| 620 } | 642 } |
| 621 | 643 |
| 622 | 644 |
| 623 | 645 /*This is called after the threads have shut down and control has returned |
| 624 /*This is called after the threads have shut down and control as returned | |
| 625 * to the semantic layer, in the entry point function in the main thread. | 646 * to the semantic layer, in the entry point function in the main thread. |
| 626 * It has to free anything allocated during VMS_init, and any other alloc'd | 647 * It has to free anything allocated during VMS_init, and any other alloc'd |
| 627 * locations that might be left over. | 648 * locations that might be left over. |
| 628 */ | 649 */ |
| 629 void | 650 void |
| 630 VMS__cleanup_after_shutdown() | 651 VMS__cleanup_after_shutdown() |
| 631 { int i; | 652 { |
| 632 | 653 SRSWQueueStruc **readyToAnimateQs; |
| 633 free( _VMSWorkQ ); | 654 int coreIdx; |
| 634 free( _VMSMasterEnv->filledSlots ); | 655 VirtProcr **masterVPs; |
| 635 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) | 656 SchedSlot ***allSchedSlots; //ptr to array of ptrs |
| 657 | |
| 658 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs; | |
| 659 masterVPs = _VMSMasterEnv->masterVPs; | |
| 660 allSchedSlots = _VMSMasterEnv->allSchedSlots; | |
| 661 | |
| 662 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) | |
| 636 { | 663 { |
| 637 free( _VMSMasterEnv->schedSlots[i] ); | 664 freeSRSWQ( readyToAnimateQs[ coreIdx ] ); |
| 638 } | 665 |
| 639 | 666 VMS__handle_dissipate_reqst( masterVPs[ coreIdx ] ); |
| 640 free( _VMSMasterEnv->schedSlots); | 667 |
| 641 VMS__handle_dissipate_reqst( _VMSMasterEnv->masterVirtPr ); | 668 freeSchedSlots( allSchedSlots[ coreIdx ] ); |
| 642 | 669 } |
| 670 | |
| 671 free( _VMSMasterEnv->readyToAnimateQs ); | |
| 672 free( _VMSMasterEnv->masterVPs ); | |
| 673 free( _VMSMasterEnv->allSchedSlots ); | |
| 674 | |
| 643 free( _VMSMasterEnv ); | 675 free( _VMSMasterEnv ); |
| 644 } | 676 } |
| 645 | 677 |
| 646 | 678 |
| 647 //=========================================================================== | 679 //=========================================================================== |
