comparison AnimationMaster.c @ 232:421bde2a07d7

REMOVED work-stealing, and all references to gates
author Some Random Person <seanhalle@yahoo.com>
date Thu, 15 Mar 2012 20:47:54 -0700
parents 88fd85921d7f
children a0ac58d8201c
comparison
equal deleted inserted replaced
1:606fc80bca3b 2:816ba50f18a8
188 numSlotsFilled += 1; 188 numSlotsFilled += 1;
189 } 189 }
190 } 190 }
191 } 191 }
192 192
193
194 #ifdef SYS__TURN_ON_WORK_STEALING
195 /*If no slots filled, means no more work, look for work to steal. */
196 if( numSlotsFilled == 0 )
197 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterVP );
198 }
199 #endif
200
201 MEAS__Capture_Post_Master_Point; 193 MEAS__Capture_Post_Master_Point;
202 194
203 masterSwitchToCoreCtlr( masterVP ); 195 masterSwitchToCoreCtlr( masterVP );
204 flushRegisters(); 196 flushRegisters();
205 }//MasterLoop 197 }//while(1)
206
207
208 } 198 }
209 199
210
211 //=========================== Work Stealing ==============================
212
213 /*This is first of two work-stealing approaches. It's not used, but left
214 * in the code as a simple illustration of the principle. This version
215 * has a race condition -- the core controllers are accessing their own
216 * animation slots at the same time that this work-stealer on a different
217 * core is..
218 *Because the core controllers run outside the master lock, this interaction
219 * is not protected.
220 */
221 void inline
222 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
223 SlaveVP *masterVP )
224 {
225 SlaveVP *stolenSlv;
226 int32 coreIdx, i;
227 VMSQueueStruc *currQ;
228
229 stolenSlv = NULL;
230 coreIdx = masterVP->coreAnimatedBy;
231 for( i = 0; i < NUM_CORES -1; i++ )
232 {
233 if( coreIdx >= NUM_CORES -1 )
234 { coreIdx = 0;
235 }
236 else
237 { coreIdx++;
238 }
239 //TODO: fix this for coreCtlr scans slots
240 // currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
241 if( numInVMSQ( currQ ) > 0 )
242 { stolenSlv = readVMSQ (currQ );
243 break;
244 }
245 }
246
247 if( stolenSlv != NULL )
248 { currSlot->slaveAssignedToSlot = stolenSlv;
249 stolenSlv->schedSlot = currSlot;
250 currSlot->needsSlaveAssigned = FALSE;
251
252 writeVMSQ( stolenSlv, readyToAnimateQ );
253 }
254 }
255
256 /*This algorithm makes the common case fast. Make the coreloop passive,
257 * and show its progress. Make the stealer control a gate that coreloop
258 * has to pass.
259 *To avoid interference, only one stealer at a time. Use a global
260 * stealer-lock, so only the stealer is slowed.
261 *
262 *The pattern is based on a gate -- stealer shuts the gate, then monitors
263 * to be sure any already past make it all the way out, before starting.
264 *So, have a "progress" measure just before the gate, then have two after it,
265 * one is in a "waiting room" outside the gate, the other is at the exit.
266 *Then, the stealer first shuts the gate, then checks the progress measure
267 * outside it, then looks to see if the progress measure at the exit is the
268 * same. If yes, it knows the protected area is empty 'cause no other way
269 * to get in and the last to get in also exited.
270 *If the progress measure at the exit is not the same, then the stealer goes
271 * into a loop checking both the waiting-area and the exit progress-measures
272 * until one of them shows the same as the measure outside the gate. Might
273 * as well re-read the measure outside the gate each go around, just to be
274 * sure. It is guaranteed that one of the two will eventually match the one
275 * outside the gate.
276 *
277 *Here's an informal proof of correctness:
278 *The gate can be closed at any point, and have only four cases:
279 * 1) coreloop made it past the gate-closing but not yet past the exit
280 * 2) coreloop made it past the pre-gate progress update but not yet past
281 * the gate,
282 * 3) coreloop is right before the pre-gate update
283 * 4) coreloop is past the exit and far from the pre-gate update.
284 *
285 * Covering the cases in reverse order,
286 * 4) is not a problem -- stealer will read pre-gate progress, see that it
287 * matches exit progress, and the gate is closed, so stealer can proceed.
288 * 3) stealer will read pre-gate progress just after coreloop updates it..
289 * so stealer goes into a loop until the coreloop causes wait-progress
290 * to match pre-gate progress, so then stealer can proceed
291 * 2) same as 3..
292 * 1) stealer reads pre-gate progress, sees that it's different than exit,
293 * so goes into loop until exit matches pre-gate, now it knows coreloop
294 * is not in protected and cannot get back in, so can proceed.
295 *
296 *Implementation for the stealer:
297 *
298 *First, acquire the stealer lock -- only cores with no work to do will
299 * compete to steal, so not a big performance penalty having only one --
300 * will rarely have multiple stealers in a system with plenty of work -- and
301 * in a system with little work, it doesn't matter.
302 *
303 *Note, have single-reader, single-writer pattern for all variables used to
304 * communicate between stealer and victims
305 *
306 *So, scan the queues of the core controllers, until find non-empty. Each core
307 * has its own list that it scans. The list goes in order from closest to
308 * furthest core, so it steals first from close cores. Later can add
309 * taking info from the app about overlapping footprints, and scan all the
310 * others then choose work with the most footprint overlap with the contents
311 * of this core's cache.
312 *
313 *Now, have a victim want to take work from. So, shut the gate in that
314 * coreloop, by setting the "gate closed" var on its stack to TRUE.
315 *Then, read the core's pre-gate progress and compare to the core's exit
316 * progress.
317 *If same, can proceed to take work from the coreloop's queue. When done,
318 * write FALSE to gate closed var.
319 *If different, then enter a loop that reads the pre-gate progress, then
320 * compares to exit progress then to wait progress. When one of two
321 * matches, proceed. Take work from the coreloop's queue. When done,
322 * write FALSE to the gate closed var.
323 *
324 */
325 void inline
326 gateProtected_stealWorkInto( SchedSlot *currSlot,
327 VMSQueueStruc *myReadyToAnimateQ,
328 SlaveVP *masterVP )
329 {
330 SlaveVP *stolenSlv;
331 int32 coreIdx, i, haveAVictim, gotLock;
332 VMSQueueStruc *victimsQ;
333
334 volatile GateStruc *vicGate;
335 int32 coreMightBeInProtected;
336
337
338
339 //see if any other cores have work available to steal
340 haveAVictim = FALSE;
341 coreIdx = masterVP->coreAnimatedBy;
342 for( i = 0; i < NUM_CORES -1; i++ )
343 {
344 if( coreIdx >= NUM_CORES -1 )
345 { coreIdx = 0;
346 }
347 else
348 { coreIdx++;
349 }
350 //TODO: fix this for coreCtlr scans slots
351 // victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
352 if( numInVMSQ( victimsQ ) > 0 )
353 { haveAVictim = TRUE;
354 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
355 break;
356 }
357 }
358 if( !haveAVictim ) return; //no work to steal, exit
359
360 //have a victim core, now get the stealer-lock
361 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
362 UNLOCKED, LOCKED );
363 if( !gotLock ) return; //go back to core controller, which will re-start master
364
365
366 //====== Start Gate-protection =======
367 vicGate->gateClosed = TRUE;
368 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
369 while( coreMightBeInProtected )
370 { //wait until sure
371 if( vicGate->preGateProgress == vicGate->waitProgress )
372 coreMightBeInProtected = FALSE;
373 if( vicGate->preGateProgress == vicGate->exitProgress )
374 coreMightBeInProtected = FALSE;
375 }
376
377 stolenSlv = readVMSQ ( victimsQ );
378
379 vicGate->gateClosed = FALSE;
380 //======= End Gate-protection =======
381
382
383 if( stolenSlv != NULL ) //victim could have been in protected and taken
384 { currSlot->slaveAssignedToSlot = stolenSlv;
385 stolenSlv->schedSlot = currSlot;
386 currSlot->needsSlaveAssigned = FALSE;
387
388 writeVMSQ( stolenSlv, myReadyToAnimateQ );
389 }
390
391 //unlock the work stealing lock
392 _VMSMasterEnv->workStealingLock = UNLOCKED;
393 }