view MasterLoop.c @ 220:fb75c4b52d32

Fixed sequential version
author Merten Sach <msach@mailbox.tu-berlin.de>
date Mon, 05 Mar 2012 13:59:02 +0100
parents 91d0d2e06719
children
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
9 #include <stdio.h>
10 #include <stddef.h>
12 #include "VMS.h"
13 #include "ProcrContext.h"
16 //===========================================================================
17 void inline
18 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
19 VirtProcr *masterVP );
21 //===========================================================================
25 /*This code is animated by the virtual Master processor.
26 *
27 *Polls each sched slot exactly once, hands any requests made by a newly
28 * done slave to the "request handler" plug-in function
29 *
30 *Any slots that need a virt procr assigned are given to the "schedule"
31 * plug-in function, which tries to assign a virt procr (slave) to it.
32 *
33 *When all slots needing a processor have been given to the schedule plug-in,
34 * a fraction of the procrs successfully scheduled are put into the
35 * work queue, then a continuation of this function is put in, then the rest
36 * of the virt procrs that were successfully scheduled.
37 *
38 *The first thing the continuation does is busy-wait until the previous
39 * animation completes. This is because an (unlikely) continuation may
40 * sneak through queue before previous continuation is done putting second
41 * part of scheduled slaves in, which is the only race condition.
42 *
43 */
45 /*May 29, 2010 -- birth a Master during init so that first core loop to
46 * start running gets it and does all the stuff for a newly born --
47 * from then on, will be doing continuation, but do suspension self
48 * directly at end of master loop
49 *So VMS__init just births the master virtual processor same way it births
50 * all the others -- then does any extra setup needed and puts it into the
51 * work queue.
52 *However means have to make masterEnv a global static volatile the same way
53 * did with readyToAnimateQ in core loop. -- for performance, put the
54 * jump to the core loop directly in here, and have it directly jump back.
55 *
56 *
57 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
58 * avoids the suspected bug in the system stack that causes bizarre faults
59 * at random places in the system code.
60 *
61 *So, this function is coupled to each of the MasterVPs, -- meaning this
62 * function can't rely on a particular stack and frame -- each MasterVP that
63 * animates this function has a different one.
64 *
65 *At this point, the masterLoop does not write itself into the queue anymore,
66 * instead, the coreLoop acquires the masterLock when it has nothing to
67 * animate, and then animates its own masterLoop. However, still try to put
68 * several AppVPs into the queue to amortize the startup cost of switching
69 * to the MasterVP. Note, don't have to worry about latency of requests much
70 * because most requests generate work for same core -- only latency issue
71 * is case when other cores starved and one core's requests generate work
72 * for them -- so keep max in queue to 3 or 4..
73 */
74 void masterLoop( void *initData, VirtProcr *animatingPr )
75 {
76 int32 slotIdx, numSlotsFilled;
77 VirtProcr *schedVirtPr;
78 SchedSlot *currSlot, **schedSlots;
79 MasterEnv *masterEnv;
80 VMSQueueStruc *readyToAnimateQ;
82 SlaveScheduler slaveScheduler;
83 RequestHandler requestHandler;
84 void *semanticEnv;
86 int32 thisCoresIdx;
87 VirtProcr *masterVP;
88 volatile VirtProcr *volatilemasterVP;
90 volatilemasterVP = animatingPr;
91 masterVP = (VirtProcr*)volatilemasterVP; //used to force re-define after jmp
93 //====================== Measurement =====================
94 TSCountLowHigh endMaster;
95 uint64 numCycles;
96 //==========================================================
98 masterEnv = (MasterEnv*)_VMSMasterEnv;
100 masterVP = (VirtProcr*)volatilemasterVP; //just to make sure after jmp
101 thisCoresIdx = masterVP->coreAnimatedBy;
102 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
103 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
105 requestHandler = masterEnv->requestHandler;
106 slaveScheduler = masterEnv->slaveScheduler;
107 semanticEnv = masterEnv->semanticEnv;
109 while(1){ //switch to core_loop and back to here is at end of loop
111 //============================= MEASUREMENT STUFF =======================
112 #ifdef MEAS__TIME_MASTER
113 //Total Master time includes one coreloop time -- just assume the core
114 // loop time is same for Master as for AppVPs, even though it may be
115 // smaller due to higher predictability of the fixed jmp.
116 saveLowTimeStampCountInto( masterVP->startMasterTSCLow );
117 #endif
118 //=======================================================================
121 //Poll each slot's Done flag
122 numSlotsFilled = 0;
123 /*
124 Meas_startMasterLoop
125 */
126 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
127 {
128 currSlot = schedSlots[ slotIdx ];
130 if( currSlot->workIsDone )
131 {
132 currSlot->workIsDone = FALSE;
133 currSlot->needsProcrAssigned = TRUE;
135 //process requests from slave to master
136 /*
137 Meas_startReqHdlr
138 */
139 (*requestHandler)( currSlot->procrAssignedToSlot, semanticEnv );
140 /*
141 Meas_endReqHdlr
142 */
143 }
144 if( currSlot->needsProcrAssigned )
145 { //give slot a new virt procr
146 schedVirtPr =
147 (*slaveScheduler)( semanticEnv, thisCoresIdx );
149 if( schedVirtPr != NULL )
150 { currSlot->procrAssignedToSlot = schedVirtPr;
151 schedVirtPr->schedSlot = currSlot;
152 currSlot->needsProcrAssigned = FALSE;
153 numSlotsFilled += 1;
155 writeVMSQ( schedVirtPr, readyToAnimateQ );
156 }
157 }
158 }
159 /*
160 Meas_endMasterLoop
161 */
163 #ifdef USE_WORK_STEALING
164 //If no slots filled, means no more work, look for work to steal.
165 if( numSlotsFilled == 0 )
166 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterVP );
167 }
168 #endif
172 //=================== Meas =======================
173 #ifdef MEAS__TIME_MASTER
174 saveLowTimeStampCountInto( masterVP->endMasterTSCLow );
175 #endif
176 #ifdef MEAS__TIME_2011_SYS
177 //Take meas here, to get cycles since entered Master
178 saveTSCLowHigh(endMaster);
179 numCycles = endMaster.longVal - _VMSMasterEnv->startMaster.longVal;
181 if( numCycles < 200000 ) //sanity check against swap thd out)
182 { masterEnv->totalMasterCycles += numCycles;
183 masterEnv->numMasterAnimations++;
184 }
185 #endif
186 //==================================================
187 masterSwitchToCoreLoop(animatingPr); //"finishes" when switch back to Master
188 flushRegisters();
189 }//MasterLoop
191 }
195 /*This has a race condition -- the coreloops are accessing their own queues
196 * at the same time that this work-stealer on a different core is trying to
197 */
198 void inline
199 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
200 VirtProcr *masterVP )
201 {
202 VirtProcr *stolenPr;
203 int32 coreIdx, i;
204 VMSQueueStruc *currQ;
206 stolenPr = NULL;
207 coreIdx = masterVP->coreAnimatedBy;
208 for( i = 0; i < NUM_CORES -1; i++ )
209 {
210 if( coreIdx >= NUM_CORES -1 )
211 { coreIdx = 0;
212 }
213 else
214 { coreIdx++;
215 }
216 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
217 if( numInVMSQ( currQ ) > 0 )
218 { stolenPr = readVMSQ (currQ );
219 break;
220 }
221 }
223 if( stolenPr != NULL )
224 { currSlot->procrAssignedToSlot = stolenPr;
225 stolenPr->schedSlot = currSlot;
226 currSlot->needsProcrAssigned = FALSE;
228 writeVMSQ( stolenPr, readyToAnimateQ );
229 }
230 }
232 /*This algorithm makes the common case fast. Make the coreloop passive,
233 * and show its progress. Make the stealer control a gate that coreloop
234 * has to pass.
235 *To avoid interference, only one stealer at a time. Use a global
236 * stealer-lock.
237 *
238 *The pattern is based on a gate -- stealer shuts the gate, then monitors
239 * to be sure any already past make it all the way out, before starting.
240 *So, have a "progress" measure just before the gate, then have two after it,
241 * one is in a "waiting room" outside the gate, the other is at the exit.
242 *Then, the stealer first shuts the gate, then checks the progress measure
243 * outside it, then looks to see if the progress measure at the exit is the
244 * same. If yes, it knows the protected area is empty 'cause no other way
245 * to get in and the last to get in also exited.
246 *If the progress measure at the exit is not the same, then the stealer goes
247 * into a loop checking both the waiting-area and the exit progress-measures
248 * until one of them shows the same as the measure outside the gate. Might
249 * as well re-read the measure outside the gate each go around, just to be
250 * sure. It is guaranteed that one of the two will eventually match the one
251 * outside the gate.
252 *
253 *Here's an informal proof of correctness:
254 *The gate can be closed at any point, and have only four cases:
255 * 1) coreloop made it past the gate-closing but not yet past the exit
256 * 2) coreloop made it past the pre-gate progress update but not yet past
257 * the gate,
258 * 3) coreloop is right before the pre-gate update
259 * 4) coreloop is past the exit and far from the pre-gate update.
260 *
261 * Covering the cases in reverse order,
262 * 4) is not a problem -- stealer will read pre-gate progress, see that it
263 * matches exit progress, and the gate is closed, so stealer can proceed.
264 * 3) stealer will read pre-gate progress just after coreloop updates it..
265 * so stealer goes into a loop until the coreloop causes wait-progress
266 * to match pre-gate progress, so then stealer can proceed
267 * 2) same as 3..
268 * 1) stealer reads pre-gate progress, sees that it's different than exit,
269 * so goes into loop until exit matches pre-gate, now it knows coreloop
270 * is not in protected and cannot get back in, so can proceed.
271 *
272 *Implementation for the stealer:
273 *
274 *First, acquire the stealer lock -- only cores with no work to do will
275 * compete to steal, so not a big performance penalty having only one --
276 * will rarely have multiple stealers in a system with plenty of work -- and
277 * in a system with little work, it doesn't matter.
278 *
279 *Note, have single-reader, single-writer pattern for all variables used to
280 * communicate between stealer and victims
281 *
282 *So, scan the queues of the core loops, until find non-empty. Each core
283 * has its own list that it scans. The list goes in order from closest to
284 * furthest core, so it steals first from close cores. Later can add
285 * taking info from the app about overlapping footprints, and scan all the
286 * others then choose work with the most footprint overlap with the contents
287 * of this core's cache.
288 *
289 *Now, have a victim want to take work from. So, shut the gate in that
290 * coreloop, by setting the "gate closed" var on its stack to TRUE.
291 *Then, read the core's pre-gate progress and compare to the core's exit
292 * progress.
293 *If same, can proceed to take work from the coreloop's queue. When done,
294 * write FALSE to gate closed var.
295 *If different, then enter a loop that reads the pre-gate progress, then
296 * compares to exit progress then to wait progress. When one of two
297 * matches, proceed. Take work from the coreloop's queue. When done,
298 * write FALSE to the gate closed var.
299 *
300 */
301 void inline
302 gateProtected_stealWorkInto( SchedSlot *currSlot,
303 VMSQueueStruc *myReadyToAnimateQ,
304 VirtProcr *masterVP )
305 {
306 VirtProcr *stolenPr;
307 int32 coreIdx, i, haveAVictim, gotLock;
308 VMSQueueStruc *victimsQ;
310 volatile GateStruc *vicGate;
311 int32 coreMightBeInProtected;
315 //see if any other cores have work available to steal
316 haveAVictim = FALSE;
317 coreIdx = masterVP->coreAnimatedBy;
318 for( i = 0; i < NUM_CORES -1; i++ )
319 {
320 if( coreIdx >= NUM_CORES -1 )
321 { coreIdx = 0;
322 }
323 else
324 { coreIdx++;
325 }
326 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
327 if( numInVMSQ( victimsQ ) > 0 )
328 { haveAVictim = TRUE;
329 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
330 break;
331 }
332 }
333 if( !haveAVictim ) return; //no work to steal, exit
335 //have a victim core, now get the stealer-lock
336 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
337 UNLOCKED, LOCKED );
338 if( !gotLock ) return; //go back to core loop, which will re-start master
341 //====== Start Gate-protection =======
342 vicGate->gateClosed = TRUE;
343 coreMightBeInProtected= (vicGate->preGateProgress != vicGate->exitProgress);
344 while( coreMightBeInProtected )
345 { //wait until sure
346 if( vicGate->preGateProgress == vicGate->waitProgress )
347 coreMightBeInProtected = FALSE;
348 if( vicGate->preGateProgress == vicGate->exitProgress )
349 coreMightBeInProtected = FALSE;
350 }
352 stolenPr = readVMSQ ( victimsQ );
354 vicGate->gateClosed = FALSE;
355 //======= End Gate-protection =======
358 if( stolenPr != NULL ) //victim could have been in protected and taken
359 { currSlot->procrAssignedToSlot = stolenPr;
360 stolenPr->schedSlot = currSlot;
361 currSlot->needsProcrAssigned = FALSE;
363 writeVMSQ( stolenPr, myReadyToAnimateQ );
364 }
366 //unlock the work stealing lock
367 _VMSMasterEnv->workStealingLock = UNLOCKED;
368 }