view SchedulingMaster.c @ 228:125f0f90a0d3

integrating holistic model in common ancestor - not finished yet
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Thu, 15 Mar 2012 18:28:29 +0100
parents b0b93147adfb
children
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
9 #include <stdio.h>
10 #include <stddef.h>
12 #include "VMS.h"
15 //===========================================================================
16 void inline
17 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
18 SlaveVP *masterVP );
20 //===========================================================================
24 /*This code is animated by the virtual Master processor.
25 *
26 *Polls each sched slot exactly once, hands any requests made by a newly
27 * done slave to the "request handler" plug-in function
28 *
29 *Any slots that need a Slv assigned are given to the "assign"
30 * plug-in function, which tries to assign a Slv (slave) to it.
31 *
32 *When all slots needing a processor have been given to the assign plug-in,
33 * a fraction of the slaves successfully assigned are put into the
34 * work queue, then a continuation of this function is put in, then the rest
35 * of the Slvs that were successfully assigned.
36 *
37 *The first thing the continuation does is busy-wait until the previous
38 * animation completes. This is because an (unlikely) continuation may
39 * sneak through queue before previous continuation is done putting second
40 * part of assigned slaves in, which is the only race condition.
41 *
42 */
44 /*May 29, 2010 -- birth a Master during init so that first core controller to
45 * start running gets it and does all the stuff for a newly born --
46 * from then on, will be doing continuation, but do suspension self
47 * directly at end of master loop
48 *So VMS_WL__init just births the master virtual processor same way it births
49 * all the others -- then does any extra setup needed and puts it into the
50 * work queue.
51 *However means have to make masterEnv a global static volatile the same way
52 * did with readyToAnimateQ in core controller. -- for performance, put the
53 * jump to the core controller directly in here, and have it directly jump back.
54 *
55 *
56 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
57 * avoids the suspected bug in the system stack that causes bizarre faults
58 * at random places in the system code.
59 *
60 *So, this function is coupled to each of the MasterVPs, -- meaning this
61 * function can't rely on a particular stack and frame -- each MasterVP that
62 * animates this function has a different one.
63 *
64 *At this point, the schedulingMaster does not write itself into the queue anymore,
65 * instead, the coreCtlr acquires the masterLock when it has nothing to
66 * animate, and then animates its own schedulingMaster. However, still try to put
67 * several AppSlvs into the queue to amortize the startup cost of switching
68 * to the MasterVP. Note, don't have to worry about latency of requests much
69 * because most requests generate work for same core -- only latency issue
70 * is case when other cores starved and one core's requests generate work
71 * for them -- so keep max in queue to 3 or 4..
72 */
73 void schedulingMaster( void *initData, SlaveVP *animatingSlv )
74 {
75 int32 slotIdx, numSlotsFilled;
76 SlaveVP *schedSlaveVP;
77 SchedSlot *currSlot, **schedSlots;
78 MasterEnv *masterEnv;
79 VMSQueueStruc *readyToAnimateQ;
81 SlaveAssigner slaveAssigner;
82 RequestHandler requestHandler;
83 void *semanticEnv;
85 int32 thisCoresIdx;
86 SlaveVP *masterVP;
87 volatile SlaveVP *volatileMasterVP;
89 volatileMasterVP = animatingSlv;
90 masterVP = (SlaveVP*)volatileMasterVP; //used to force re-define after jmp
92 //First animation of each MasterVP will in turn animate this part
93 // of setup code.. (Slv creator sets up the stack as if this function
94 // was called normally, but actually get here by jmp)
95 //So, setup values about stack ptr, jmp pt and all that
96 //masterVP->resumeInstrPtr = &&schedulingMasterStartPt;
99 //Note, got rid of writing the stack and frame ptr up here, because
100 // only one
101 // core can ever animate a given MasterVP, so don't need to communicate
102 // new frame and stack ptr to the MasterVP storage before a second
103 // version of that MasterVP can get animated on a different core.
104 //Also got rid of the busy-wait.
107 //schedulingMasterStartPt:
108 while(1){
110 MEAS__Capture_Pre_Master_Point
112 masterEnv = (MasterEnv*)_VMSMasterEnv;
114 //GCC may optimize so doesn't always re-define from frame-storage
115 masterVP = (SlaveVP*)volatileMasterVP; //just to make sure after jmp
116 thisCoresIdx = masterVP->coreAnimatedBy;
117 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
119 requestHandler = masterEnv->requestHandler;
120 slaveAssigner = masterEnv->slaveAssigner;
121 semanticEnv = masterEnv->semanticEnv;
124 //Poll each slot's Done flag
125 numSlotsFilled = 0;
126 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
127 {
128 currSlot = schedSlots[ slotIdx ];
130 if( currSlot->workIsDone )
131 {
132 currSlot->workIsDone = FALSE;
133 currSlot->needsSlaveAssigned = TRUE;
135 MEAS__startReqHdlr;
137 //process the requests made by the slave (held inside slave struc)
138 (*requestHandler)( currSlot->slaveAssignedToSlot, semanticEnv );
140 MEAS__endReqHdlr;
141 }
142 if( currSlot->needsSlaveAssigned )
143 { //give slot a new Slv
144 schedSlaveVP =
145 (*slaveAssigner)( semanticEnv, thisCoresIdx, currSlot );
147 if( schedSlaveVP != NULL )
148 { currSlot->slaveAssignedToSlot = schedSlaveVP;
149 schedSlaveVP->schedSlot = currSlot;
150 currSlot->needsSlaveAssigned = FALSE;
151 numSlotsFilled += 1;
152 }
153 }
154 }
157 #ifdef SYS__TURN_ON_WORK_STEALING
158 //If no slots filled, means no more work, look for work to steal.
159 if( numSlotsFilled == 0 )
160 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterVP );
161 }
162 #endif
164 MEAS__Capture_Post_Master_Point;
166 masterSwitchToCoreCtlr(animatingSlv);
167 flushRegisters();
168 }//MasterLoop
171 }
175 /*This has a race condition -- the coreloops are accessing their own queues
176 * at the same time that this work-stealer on a different core is trying to
177 */
178 void inline
179 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
180 SlaveVP *masterVP )
181 {
182 SlaveVP *stolenSlv;
183 int32 coreIdx, i;
184 VMSQueueStruc *currQ;
186 stolenSlv = NULL;
187 coreIdx = masterVP->coreAnimatedBy;
188 for( i = 0; i < NUM_CORES -1; i++ )
189 {
190 if( coreIdx >= NUM_CORES -1 )
191 { coreIdx = 0;
192 }
193 else
194 { coreIdx++;
195 }
196 //TODO: fix this for coreCtlr scans slots
197 // currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
198 if( numInVMSQ( currQ ) > 0 )
199 { stolenSlv = readVMSQ (currQ );
200 break;
201 }
202 }
204 if( stolenSlv != NULL )
205 { currSlot->slaveAssignedToSlot = stolenSlv;
206 stolenSlv->schedSlot = currSlot;
207 currSlot->needsSlaveAssigned = FALSE;
209 writeVMSQ( stolenSlv, readyToAnimateQ );
210 }
211 }
213 /*This algorithm makes the common case fast. Make the coreloop passive,
214 * and show its progress. Make the stealer control a gate that coreloop
215 * has to pass.
216 *To avoid interference, only one stealer at a time. Use a global
217 * stealer-lock.
218 *
219 *The pattern is based on a gate -- stealer shuts the gate, then monitors
220 * to be sure any already past make it all the way out, before starting.
221 *So, have a "progress" measure just before the gate, then have two after it,
222 * one is in a "waiting room" outside the gate, the other is at the exit.
223 *Then, the stealer first shuts the gate, then checks the progress measure
224 * outside it, then looks to see if the progress measure at the exit is the
225 * same. If yes, it knows the protected area is empty 'cause no other way
226 * to get in and the last to get in also exited.
227 *If the progress measure at the exit is not the same, then the stealer goes
228 * into a loop checking both the waiting-area and the exit progress-measures
229 * until one of them shows the same as the measure outside the gate. Might
230 * as well re-read the measure outside the gate each go around, just to be
231 * sure. It is guaranteed that one of the two will eventually match the one
232 * outside the gate.
233 *
234 *Here's an informal proof of correctness:
235 *The gate can be closed at any point, and have only four cases:
236 * 1) coreloop made it past the gate-closing but not yet past the exit
237 * 2) coreloop made it past the pre-gate progress update but not yet past
238 * the gate,
239 * 3) coreloop is right before the pre-gate update
240 * 4) coreloop is past the exit and far from the pre-gate update.
241 *
242 * Covering the cases in reverse order,
243 * 4) is not a problem -- stealer will read pre-gate progress, see that it
244 * matches exit progress, and the gate is closed, so stealer can proceed.
245 * 3) stealer will read pre-gate progress just after coreloop updates it..
246 * so stealer goes into a loop until the coreloop causes wait-progress
247 * to match pre-gate progress, so then stealer can proceed
248 * 2) same as 3..
249 * 1) stealer reads pre-gate progress, sees that it's different than exit,
250 * so goes into loop until exit matches pre-gate, now it knows coreloop
251 * is not in protected and cannot get back in, so can proceed.
252 *
253 *Implementation for the stealer:
254 *
255 *First, acquire the stealer lock -- only cores with no work to do will
256 * compete to steal, so not a big performance penalty having only one --
257 * will rarely have multiple stealers in a system with plenty of work -- and
258 * in a system with little work, it doesn't matter.
259 *
260 *Note, have single-reader, single-writer pattern for all variables used to
261 * communicate between stealer and victims
262 *
263 *So, scan the queues of the core controllers, until find non-empty. Each core
264 * has its own list that it scans. The list goes in order from closest to
265 * furthest core, so it steals first from close cores. Later can add
266 * taking info from the app about overlapping footprints, and scan all the
267 * others then choose work with the most footprint overlap with the contents
268 * of this core's cache.
269 *
270 *Now, have a victim want to take work from. So, shut the gate in that
271 * coreloop, by setting the "gate closed" var on its stack to TRUE.
272 *Then, read the core's pre-gate progress and compare to the core's exit
273 * progress.
274 *If same, can proceed to take work from the coreloop's queue. When done,
275 * write FALSE to gate closed var.
276 *If different, then enter a loop that reads the pre-gate progress, then
277 * compares to exit progress then to wait progress. When one of two
278 * matches, proceed. Take work from the coreloop's queue. When done,
279 * write FALSE to the gate closed var.
280 *
281 */
282 void inline
283 gateProtected_stealWorkInto( SchedSlot *currSlot,
284 VMSQueueStruc *myReadyToAnimateQ,
285 SlaveVP *masterVP )
286 {
287 SlaveVP *stolenSlv;
288 int32 coreIdx, i, haveAVictim, gotLock;
289 VMSQueueStruc *victimsQ;
291 volatile GateStruc *vicGate;
292 int32 coreMightBeInProtected;
296 //see if any other cores have work available to steal
297 haveAVictim = FALSE;
298 coreIdx = masterVP->coreAnimatedBy;
299 for( i = 0; i < NUM_CORES -1; i++ )
300 {
301 if( coreIdx >= NUM_CORES -1 )
302 { coreIdx = 0;
303 }
304 else
305 { coreIdx++;
306 }
307 //TODO: fix this for coreCtlr scans slots
308 // victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
309 if( numInVMSQ( victimsQ ) > 0 )
310 { haveAVictim = TRUE;
311 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
312 break;
313 }
314 }
315 if( !haveAVictim ) return; //no work to steal, exit
317 //have a victim core, now get the stealer-lock
318 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
319 UNLOCKED, LOCKED );
320 if( !gotLock ) return; //go back to core controller, which will re-start master
323 //====== Start Gate-protection =======
324 vicGate->gateClosed = TRUE;
325 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
326 while( coreMightBeInProtected )
327 { //wait until sure
328 if( vicGate->preGateProgress == vicGate->waitProgress )
329 coreMightBeInProtected = FALSE;
330 if( vicGate->preGateProgress == vicGate->exitProgress )
331 coreMightBeInProtected = FALSE;
332 }
334 stolenSlv = readVMSQ ( victimsQ );
336 vicGate->gateClosed = FALSE;
337 //======= End Gate-protection =======
340 if( stolenSlv != NULL ) //victim could have been in protected and taken
341 { currSlot->slaveAssignedToSlot = stolenSlv;
342 stolenSlv->schedSlot = currSlot;
343 currSlot->needsSlaveAssigned = FALSE;
345 writeVMSQ( stolenSlv, myReadyToAnimateQ );
346 }
348 //unlock the work stealing lock
349 _VMSMasterEnv->workStealingLock = UNLOCKED;
350 }