Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
comparison MasterLoop.c @ 5:6c518bda83fe
About to chg to win thds -- good except for that
| author | Me |
|---|---|
| date | Mon, 31 May 2010 19:15:14 -0700 |
| parents | a5fe730dfc2e |
| children | a87d02855dee |
comparison
equal
deleted
inserted
replaced
| 0:4c2da6146c19 | 1:ad6c7d2801cc |
|---|---|
| 35 * previous done putting second half of scheduled slaves in. This is the only | 35 * previous done putting second half of scheduled slaves in. This is the only |
| 36 * race condition. | 36 * race condition. |
| 37 * | 37 * |
| 38 */ | 38 */ |
| 39 | 39 |
| 40 void masterLoop( void *data ) | 40 /*May 29, 2010 -- birth a Master during init so that first core loop to |
| 41 * start running gets it and does all the stuff for a newly born | |
| 42 * from then on, will be doing continuation -- but do suspension self | |
| 43 * directly at end of master loop | |
| 44 *So VMS__init just births the master virtual processor same way it births | |
| 45 * all the others -- then does any extra setup needed and puts it into the | |
| 46 * work queue. | |
| 47 *However means have to make masterEnv a global static volatile the same way | |
| 48 * did with workQ in core loop. -- for performance, put the | |
| 49 * jump to core loop directly in here, and have it directly jump back. | |
| 50 */ | |
| 51 void masterLoop( void *initData, VirtProcr *masterPr ) | |
| 41 { bool8 success; | 52 { bool8 success; |
| 42 int slaveIdx, numScheduled, numInFirstHalf, schedSlaveIdx; | 53 int slotIdx, numScheduled, numInFirstChunk, filledSlotIdx; |
| 43 VMSProcr currSlave, *virtSlaves; | 54 SchedSlot *currSlot, **schedSlots, **filledSlots; |
| 44 MasterEnv *masterEnv; | 55 MasterEnv *masterEnv; |
| 56 QueueStruc *workQ; | |
| 57 // VirtProcr *masterPr; | |
| 58 void *jmpPt; | |
| 59 | |
| 45 SlaveScheduler slaveScheduler; | 60 SlaveScheduler slaveScheduler; |
| 46 RequestHandler requestHandler; | 61 RequestHandler requestHandler; |
| 47 | 62 |
| 63 //this will run as the first virt processor in workQ, and will be a | |
| 64 // new born -- so will do all the GCC-generated allocating space on | |
| 65 // the stack owned by master virt procr -- and will run this last bit | |
| 66 // of setup code.. | |
| 67 masterPr->nextInstrPt = &&masterLoopStartPt; | |
| 48 | 68 |
| 49 masterEnv = (MasterEnv *)data; | 69 |
| 70 masterLoopStartPt: | |
| 50 | 71 |
| 72 //if another reference to same Master VirtProcr still going, busy-wait | |
| 73 //Could put this lower, but don't want to think about shared stack.. | |
| 74 while( masterEnv->stillRunning ) /*busy wait*/ ; | |
| 75 //TODO: want to do busy-wait as assembly, to be sure stack not touched? | |
| 76 | |
| 77 //this is the only master running now, set flag again | |
| 78 masterEnv->stillRunning = TRUE; | |
| 79 | |
| 80 //TODO: gdb -- check that a volatile _VMSMasterEnv and _VMSWorkQ means | |
| 81 // all these will be re-filled every time jump here.. | |
| 82 workQ = _VMSWorkQ; | |
| 83 masterEnv = _VMSMasterEnv; | |
| 51 requestHandler = masterEnv->requestHandler; | 84 requestHandler = masterEnv->requestHandler; |
| 52 slaveScheduler = masterEnv->slaveScheduler; | 85 slaveScheduler = masterEnv->slaveScheduler; |
| 53 virtSlaves = masterEnv->virtSlaves; | 86 schedSlots = masterEnv->schedSlots; |
| 87 filledSlots = masterEnv->filledSlots; | |
| 88 masterPr = masterEnv->masterVirtPr; | |
| 54 | 89 |
| 55 //if another continuation of Master still running, busy-wait | |
| 56 while( masterEnv->stillRunning ) /*busy wait*/ ; | |
| 57 | |
| 58 //this is the only master running now, set flag again | |
| 59 masterEnv->stillRunning = 1; | |
| 60 | 90 |
| 61 //prepare for scheduling | 91 //prepare for scheduling |
| 62 masterEnv->numScheduled = 0; | 92 masterEnv->numFilled = 0; |
| 63 | 93 |
| 64 //Poll each slave structure's Done flag | 94 //Poll each slot's Done flag -- slot 0 reseved for master, start at 1 |
| 65 for( slaveIdx = 0; slaveIdx < NUM_SLAVES; slaveIdx++) | 95 for( slotIdx = 1; slotIdx < NUM_SCHED_SLOTS; slotIdx++) |
| 66 { | 96 { |
| 67 currSlave = virtSlaves[ slaveIdx ]; | 97 currSlot = schedSlots[ slotIdx ]; |
| 68 | 98 |
| 69 if( currSlave->workIsDone ) | 99 if( currSlot->workIsDone ) |
| 70 { | 100 { |
| 71 currSlave->workIsDone = FALSE; | 101 currSlot->workIsDone = FALSE; |
| 72 currSlave->needsWorkAssigned = TRUE; | 102 currSlot->needsProcrAssigned = TRUE; |
| 73 | 103 |
| 74 //process requests from slave to master | 104 //process requests from slave to master |
| 75 (*requestHandler)( currSlave ); | 105 (*requestHandler)( currSlot->procrAssignedToSlot->requests ); |
| 76 } | 106 } |
| 77 if( currSlave->needsWorkAssigned ) | 107 if( currSlot->needsProcrAssigned ) |
| 78 { //give slave a new work-unit | 108 { //give slot a new virt procr |
| 79 success = | 109 success = |
| 80 (*slaveScheduler)( currSlave, masterEnv ); | 110 (*slaveScheduler)( currSlot, masterEnv->semanticEnv ); |
| 81 | 111 |
| 82 if( success ) | 112 if( success ) |
| 83 { addToVect( currSlave, &(masterEnv->scheduledSlaves), | 113 { int numFilled = masterEnv->numFilled; |
| 84 &(masterEnv->numScheduled) ); | 114 |
| 85 currSlave->needsWorkAssigned = FALSE; | 115 filledSlots[numFilled] = currSlot; |
| 116 masterEnv->numFilled += 1; | |
| 117 | |
| 118 currSlot->needsProcrAssigned = FALSE; | |
| 86 } | 119 } |
| 87 } | 120 } |
| 88 } | 121 } |
| 89 | 122 |
| 90 //put half scheduled slaves in, then continuation, then other half | 123 //put some scheduled slaves in, then continuation, then rest |
| 91 VMSProcr **scheduledSlaves; | 124 numInFirstChunk = masterEnv->numFilled / 2; //tweak this from experiments |
| 92 numInFirstHalf = masterEnv->numScheduled / 2; | 125 for( filledSlotIdx = 0; filledSlotIdx < numInFirstChunk; filledSlotIdx++) |
| 93 scheduledSlaves = masterEnv->scheduledSlaves; | |
| 94 for( schedSlaveIdx = 0; schedSlaveIdx < numInFirstHalf; schedSlaveIdx++) | |
| 95 { | 126 { |
| 96 writeQ( scheduledSlaves[ schedSlaveIdx ], workQ ); | 127 writeQ( filledSlots[ filledSlotIdx ]->procrAssignedToSlot, workQ ); |
| 97 } | 128 } |
| 98 | 129 |
| 99 //enqueue continuation of this loop | 130 //enqueue continuation of this loop |
| 100 // note that After this enqueue, continuation might sneak through | 131 // note that After this enqueue, continuation might sneak through |
| 101 writeQ( masterEnv->masterWorkUnit, workQ ); | 132 writeQ( schedSlots[0]->procrAssignedToSlot, workQ );//master always slot 0 |
| 102 for( schedSlaveIdx = numInFirstHalf; | 133 for( filledSlotIdx = numInFirstChunk; |
| 103 schedSlaveIdx < numScheduled; | 134 filledSlotIdx < numScheduled; |
| 104 schedSlaveIdx++) | 135 filledSlotIdx++) |
| 105 { | 136 { |
| 106 writeQ( scheduledSlaves[ schedSlaveIdx ]->workUnitToDo, workQ ); | 137 writeQ( filledSlots[ filledSlotIdx ]->procrAssignedToSlot, workQ ); |
| 107 } | 138 } |
| 108 | 139 |
| 109 //all done, so okay for continuation to proceed | 140 masterEnv->numFilled = 0; |
| 110 masterEnv->stillRunning = 0; | 141 |
| 142 //Don't want code above to try to look at requests in masterVirtPr, | |
| 143 // so leave workDone at FALSE, but do want it to schedule into | |
| 144 // the slot, so set needs procr assigned to TRUE. | |
| 145 masterPr->schedSlot->needsProcrAssigned = TRUE; | |
| 146 | |
| 147 //Save stack ptr and frame -- don't need to, take out later, but safe | |
| 148 // Also, wait to set stillRunning to FALSE until just before jump, to | |
| 149 // protect stack might need to jmp directly to asm busy-wait to be | |
| 150 // sure stack not touched | |
| 151 //TODO: gdb check that busy-wait doesn't touch stack, so this is safe | |
| 152 //don't need any regs to be valid when come back, so clobber list empty | |
| 153 //TODO: gdb the jmp -- make sure it jumps through register or mem | |
| 154 asm volatile("movl %%esp, %0; \ | |
| 155 movl %%ebp, %1; \ | |
| 156 movl $0x0, %2; \ | |
| 157 jmp %3 " | |
| 158 /* outputs */ : "=m" (masterPr->stackPtr), "=m" (masterPr->framePtr), | |
| 159 "=m" (masterEnv->stillRunning) | |
| 160 /* inputs */ : "r" (masterPr->coreLoopStartPt) | |
| 161 /* clobber */ | |
| 162 ); | |
| 111 } | 163 } |
| 112 | 164 |
| 113 | 165 |
