comparison MasterLoop.c @ 55:3bac84e4e56e

Works with correct matrix mult Nov 4 -- switch animators macros, many updates Changed all queues back to VMSQ variants #defines correct, protected, work-stealing, with compiler switch in and out
author Me
date Thu, 04 Nov 2010 18:13:18 -0700
parents 42dd44df1bb0
children 984f7d78bfdf dd3e60aeae26
comparison
equal deleted inserted replaced
15:e929fe72639c 16:1c7bfb48cc44
8 8
9 #include <stdio.h> 9 #include <stdio.h>
10 #include <stddef.h> 10 #include <stddef.h>
11 11
12 #include "VMS.h" 12 #include "VMS.h"
13
14
15 //===========================================================================
16 void inline
17 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
18 VirtProcr *masterPr );
19
20 //===========================================================================
13 21
14 22
15 23
16 /*This code is animated by the virtual Master processor. 24 /*This code is animated by the virtual Master processor.
17 * 25 *
62 * is case when other cores starved and one core's requests generate work 70 * is case when other cores starved and one core's requests generate work
63 * for them -- so keep max in queue to 3 or 4.. 71 * for them -- so keep max in queue to 3 or 4..
64 */ 72 */
65 void masterLoop( void *initData, VirtProcr *animatingPr ) 73 void masterLoop( void *initData, VirtProcr *animatingPr )
66 { 74 {
67 int slotIdx; 75 int32 slotIdx, numSlotsFilled;
68 VirtProcr *schedVirtPr; 76 VirtProcr *schedVirtPr;
69 SchedSlot *currSlot, **schedSlots; 77 SchedSlot *currSlot, **schedSlots;
70 MasterEnv *masterEnv; 78 MasterEnv *masterEnv;
71 VMSQueueStruc *readyToAnimateQ; 79 VMSQueueStruc *readyToAnimateQ;
72 80
73 SlaveScheduler slaveScheduler; 81 SlaveScheduler slaveScheduler;
74 RequestHandler requestHandler; 82 RequestHandler requestHandler;
75 void *semanticEnv; 83 void *semanticEnv;
76 84
77 int thisCoresIdx; 85 int32 thisCoresIdx;
78 VirtProcr *masterPr; 86 VirtProcr *masterPr;
79 volatile VirtProcr *volatileMasterPr; 87 volatile VirtProcr *volatileMasterPr;
80 88
81 volatileMasterPr = animatingPr; 89 volatileMasterPr = animatingPr;
82 masterPr = volatileMasterPr; //used to force re-define after jmp 90 masterPr = volatileMasterPr; //used to force re-define after jmp
106 #endif 114 #endif
107 //======================================================================== 115 //========================================================================
108 116
109 masterEnv = _VMSMasterEnv; 117 masterEnv = _VMSMasterEnv;
110 118
111 //TODO: check that compiles so that always re-define from frame-storage 119 //GCC may optimize so doesn't always re-define from frame-storage
112 masterPr = volatileMasterPr; //just to make sure after jmp 120 masterPr = volatileMasterPr; //just to make sure after jmp
113 thisCoresIdx = masterPr->coreAnimatedBy; 121 thisCoresIdx = masterPr->coreAnimatedBy;
114 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx]; 122 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
115 schedSlots = masterEnv->allSchedSlots[thisCoresIdx]; 123 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
116 124
118 slaveScheduler = masterEnv->slaveScheduler; 126 slaveScheduler = masterEnv->slaveScheduler;
119 semanticEnv = masterEnv->semanticEnv; 127 semanticEnv = masterEnv->semanticEnv;
120 128
121 129
122 //Poll each slot's Done flag 130 //Poll each slot's Done flag
131 numSlotsFilled = 0;
123 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++) 132 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
124 { 133 {
125 currSlot = schedSlots[ slotIdx ]; 134 currSlot = schedSlots[ slotIdx ];
126 135
127 if( currSlot->workIsDone ) 136 if( currSlot->workIsDone )
139 148
140 if( schedVirtPr != NULL ) 149 if( schedVirtPr != NULL )
141 { currSlot->procrAssignedToSlot = schedVirtPr; 150 { currSlot->procrAssignedToSlot = schedVirtPr;
142 schedVirtPr->schedSlot = currSlot; 151 schedVirtPr->schedSlot = currSlot;
143 currSlot->needsProcrAssigned = FALSE; 152 currSlot->needsProcrAssigned = FALSE;
144 153 numSlotsFilled += 1;
145 writeSRSWQ( schedVirtPr, readyToAnimateQ ); 154
155 writeVMSQ( schedVirtPr, readyToAnimateQ );
146 } 156 }
147 } 157 }
148 } 158 }
149 159
150 160
151 //Save stack ptr and frame, restore CoreLoop's stack and frame, 161 #ifdef USE_WORK_STEALING
152 // and clear the MasterLock 162 //If no slots filled, means no more work, look for work to steal.
153 //TODO: cafefully verify don't need to force saving anything to stack 163 if( numSlotsFilled == 0 )
154 // before jumping back to core loop. 164 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterPr );
155 void *stackPtrAddr, *framePtrAddr, *masterLockAddr; 165 }
156 void *jmpPt, *coreLoopFramePtr, *coreLoopStackPtr; 166 #endif
157 167
158 stackPtrAddr = &(masterPr->stackPtr);
159 framePtrAddr = &(masterPr->framePtr);
160 masterLockAddr = &(_VMSMasterEnv->masterLock);
161
162 jmpPt = _VMSMasterEnv->coreLoopStartPt;
163 coreLoopFramePtr = masterPr->coreLoopFramePtr;//need this only
164 coreLoopStackPtr = masterPr->coreLoopStackPtr;//shouldn't need -- safety
165 168
166 #ifdef MEAS__TIME_MASTER 169 #ifdef MEAS__TIME_MASTER
167 saveLowTimeStampCountInto( masterPr->endMasterTSCLow ); 170 saveLowTimeStampCountInto( masterPr->endMasterTSCLow );
168 #endif 171 #endif
169 172
170 asm volatile("movl %0, %%eax; \ 173
171 movl %%esp, (%%eax); \ 174 masterSwitchToCoreLoop( masterPr )
172 movl %1, %%eax; \
173 movl %%ebp, (%%eax); \
174 movl %2, %%ebx; \
175 movl %3, %%eax; \
176 movl %4, %%esp; \
177 movl %5, %%ebp; \
178 movl $0x0, (%%ebx); \
179 jmp %%eax;" \
180 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr), \
181 "=g"(masterLockAddr) \
182 /* inputs */ : "g" (jmpPt), "g"(coreLoopStackPtr), "g"(coreLoopFramePtr)\
183 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi" \
184 );//can probably make clobber list empty -- but safe for now
185 } 175 }
186 176
177
178
179 /*This has a race condition -- the coreloops are accessing their own queues
180 * at the same time that this work-stealer on a different core is trying to
181 */
182 void inline
183 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
184 VirtProcr *masterPr )
185 {
186 VirtProcr *stolenPr;
187 int32 coreIdx, i;
188 VMSQueueStruc *currQ;
189
190 stolenPr = NULL;
191 coreIdx = masterPr->coreAnimatedBy;
192 for( i = 0; i < NUM_CORES -1; i++ )
193 {
194 if( coreIdx >= NUM_CORES -1 )
195 { coreIdx = 0;
196 }
197 else
198 { coreIdx++;
199 }
200 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
201 if( numInVMSQ( currQ ) > 0 )
202 { stolenPr = readVMSQ (currQ );
203 break;
204 }
205 }
206
207 if( stolenPr != NULL )
208 { currSlot->procrAssignedToSlot = stolenPr;
209 stolenPr->schedSlot = currSlot;
210 currSlot->needsProcrAssigned = FALSE;
211
212 writeVMSQ( stolenPr, readyToAnimateQ );
213 }
214 }
215
216 /*This algorithm makes the common case fast. Make the coreloop passive,
217 * and show its progress. Make the stealer control a gate that coreloop
218 * has to pass.
219 *To avoid interference, only one stealer at a time. Use a global
220 * stealer-lock.
221 *
222 *The pattern is based on a gate -- stealer shuts the gate, then monitors
223 * to be sure any already past make it all the way out, before starting.
224 *So, have a "progress" measure just before the gate, then have two after it,
225 * one is in a "waiting room" outside the gate, the other is at the exit.
226 *Then, the stealer first shuts the gate, then checks the progress measure
227 * outside it, then looks to see if the progress measure at the exit is the
228 * same. If yes, it knows the protected area is empty 'cause no other way
229 * to get in and the last to get in also exited.
230 *If the progress measure at the exit is not the same, then the stealer goes
231 * into a loop checking both the waiting-area and the exit progress-measures
232 * until one of them shows the same as the measure outside the gate. Might
233 * as well re-read the measure outside the gate each go around, just to be
234 * sure. It is guaranteed that one of the two will eventually match the one
235 * outside the gate.
236 *
237 *Here's an informal proof of correctness:
238 *The gate can be closed at any point, and have only four cases:
239 * 1) coreloop made it past the gate-closing but not yet past the exit
240 * 2) coreloop made it past the pre-gate progress update but not yet past
241 * the gate,
242 * 3) coreloop is right before the pre-gate update
243 * 4) coreloop is past the exit and far from the pre-gate update.
244 *
245 * Covering the cases in reverse order,
246 * 4) is not a problem -- stealer will read pre-gate progress, see that it
247 * matches exit progress, and the gate is closed, so stealer can proceed.
248 * 3) stealer will read pre-gate progress just after coreloop updates it..
249 * so stealer goes into a loop until the coreloop causes wait-progress
250 * to match pre-gate progress, so then stealer can proceed
251 * 2) same as 3..
252 * 1) stealer reads pre-gate progress, sees that it's different than exit,
253 * so goes into loop until exit matches pre-gate, now it knows coreloop
254 * is not in protected and cannot get back in, so can proceed.
255 *
256 *Implementation for the stealer:
257 *
258 *First, acquire the stealer lock -- only cores with no work to do will
259 * compete to steal, so not a big performance penalty having only one --
260 * will rarely have multiple stealers in a system with plenty of work -- and
261 * in a system with little work, it doesn't matter.
262 *
263 *Note, have single-reader, single-writer pattern for all variables used to
264 * communicate between stealer and victims
265 *
266 *So, scan the queues of the core loops, until find non-empty. Each core
267 * has its own list that it scans. The list goes in order from closest to
268 * furthest core, so it steals first from close cores. Later can add
269 * taking info from the app about overlapping footprints, and scan all the
270 * others then choose work with the most footprint overlap with the contents
271 * of this core's cache.
272 *
273 *Now, have a victim want to take work from. So, shut the gate in that
274 * coreloop, by setting the "gate closed" var on its stack to TRUE.
275 *Then, read the core's pre-gate progress and compare to the core's exit
276 * progress.
277 *If same, can proceed to take work from the coreloop's queue. When done,
278 * write FALSE to gate closed var.
279 *If different, then enter a loop that reads the pre-gate progress, then
280 * compares to exit progress then to wait progress. When one of two
281 * matches, proceed. Take work from the coreloop's queue. When done,
282 * write FALSE to the gate closed var.
283 *
284 */
285 void inline
286 gateProtected_stealWorkInto( SchedSlot *currSlot,
287 VMSQueueStruc *myReadyToAnimateQ,
288 VirtProcr *masterPr )
289 {
290 VirtProcr *stolenPr;
291 int32 coreIdx, i, haveAVictim, gotLock;
292 VMSQueueStruc *victimsQ;
293
294 volatile GateStruc *vicGate;
295 int32 coreMightBeInProtected;
296
297
298
299 //see if any other cores have work available to steal
300 haveAVictim = FALSE;
301 coreIdx = masterPr->coreAnimatedBy;
302 for( i = 0; i < NUM_CORES -1; i++ )
303 {
304 if( coreIdx >= NUM_CORES -1 )
305 { coreIdx = 0;
306 }
307 else
308 { coreIdx++;
309 }
310 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
311 if( numInVMSQ( victimsQ ) > 0 )
312 { haveAVictim = TRUE;
313 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
314 break;
315 }
316 }
317 if( !haveAVictim ) return; //no work to steal, exit
318
319 //have a victim core, now get the stealer-lock
320 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
321 UNLOCKED, LOCKED );
322 if( !gotLock ) return; //go back to core loop, which will re-start master
323
324
325 //====== Start Gate-protection =======
326 vicGate->gateClosed = TRUE;
327 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
328 while( coreMightBeInProtected )
329 { //wait until sure
330 if( vicGate->preGateProgress == vicGate->waitProgress )
331 coreMightBeInProtected = FALSE;
332 if( vicGate->preGateProgress == vicGate->exitProgress )
333 coreMightBeInProtected = FALSE;
334 }
335
336 stolenPr = readVMSQ ( victimsQ );
337
338 vicGate->gateClosed = FALSE;
339 //======= End Gate-protection =======
340
341
342 if( stolenPr != NULL ) //victim could have been in protected and taken
343 { currSlot->procrAssignedToSlot = stolenPr;
344 stolenPr->schedSlot = currSlot;
345 currSlot->needsProcrAssigned = FALSE;
346
347 writeVMSQ( stolenPr, myReadyToAnimateQ );
348 }
349
350 //unlock the work stealing lock
351 _VMSMasterEnv->workStealingLock = UNLOCKED;
352 }