view MasterLoop.c @ 120:d4c881c7f03a

Added fn to send inter-master requests, and cleaned up code
author Me@portablequad
date Sat, 03 Sep 2011 20:41:51 -0700
parents ac11b50220bd
children 24466227d8bb
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
9 #include <stdio.h>
10 #include <stddef.h>
12 #include "VMS.h"
13 #include "ProcrContext.h"
14 #include "Master_Request_Handlers.h"
16 //===========================================================================
17 void inline
18 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
19 VirtProcr *masterPr );
21 //===========================================================================
25 /*This code is animated by the virtual Master processor.
26 *
27 *Polls each sched slot exactly once, hands any requests made by a newly
28 * done slave to the "request handler" plug-in function
29 *
30 *Any slots that need a virt procr assigned are given to the "schedule"
31 * plug-in function, which tries to assign a virt procr (slave) to it.
32 *
33 *When all slots needing a processor have been given to the schedule plug-in,
34 * a fraction of the procrs successfully scheduled are put into the
35 * work queue, then a continuation of this function is put in, then the rest
36 * of the virt procrs that were successfully scheduled.
37 *
38 *The first thing the continuation does is busy-wait until the previous
39 * animation completes. This is because an (unlikely) continuation may
40 * sneak through queue before previous continuation is done putting second
41 * part of scheduled slaves in, which is the only race condition.
42 *
43 */
45 /*May 29, 2010 -- birth a Master during init so that first core loop to
46 * start running gets it and does all the stuff for a newly born --
47 * from then on, will be doing continuation, but do suspension self
48 * directly at end of master loop
49 *So VMS__init just births the master virtual processor same way it births
50 * all the others -- then does any extra setup needed and puts it into the
51 * work queue.
52 *However means have to make masterEnv a global static volatile.
53 *
54 *
55 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
56 * avoids the suspected bug in the system stack that causes bizarre faults
57 * at random places in the system code.
58 *
59 *So, this function is coupled to each of the MasterVPs, -- meaning this
60 * function can't rely on a particular stack and frame -- each MasterVP that
61 * animates this function has a different stack.
62 *
63 *At this point, the masterLoop does not write itself into the queue anymore,
64 * instead, the coreLoop acquires the masterLock when it has nothing to
65 * animate, and then animates its own masterLoop. However, still try to put
66 * several AppVPs into the queue to amortize the startup cost of switching
67 * to the MasterVP. Note, don't have to worry about latency of requests much
68 * because most requests generate work for same core -- only latency issue
69 * is case when other cores starved and one core's requests generate work
70 * for them -- so keep max in queue to 3 or 4..
71 */
72 void masterLoop( void *initData, VirtProcr *animatingPr )
73 {
74 int32 slotIdx, numSlotsFilled;
75 VirtProcr *schedVirtPr;
76 SchedSlot *currSlot, **schedSlots;
77 MasterEnv *masterEnv;
78 VMSQueueStruc *readyToAnimateQ;
80 SlaveScheduler slaveScheduler;
81 RequestHandler requestHandler;
82 void *semanticEnv;
84 int32 thisCoresIdx;
85 VirtProcr *masterPr;
86 volatile VirtProcr *volatileMasterPr;
88 volatileMasterPr = animatingPr;
89 masterPr = (VirtProcr*)volatileMasterPr; //used to force re-define after jmp
91 //First animation of each MasterVP will in turn animate this part
92 // of setup code.. (VP creator sets up the stack as if this function
93 // was called normally, but actually get here by jmp)
94 //So, setup values about stack ptr, jmp pt and all that
95 //masterPr->nextInstrPt = &&masterLoopStartPt;
97 //Sept 2011
98 //Old code jumped directly to this point, but doesn't work on x64
99 // So, just make this an endless loop, and do assembly function at end
100 // that saves its own return addr, then jumps to core_loop.
101 while(1)
102 {
104 //============================= MEASUREMENT STUFF ========================
105 #ifdef MEAS__TIME_MASTER
106 //Total Master time includes one coreloop time -- just assume the core
107 // loop time is same for Master as is for AppVPs, even though it may be
108 // smaller due to higher predictability of the fixed jmp.
109 saveLowTimeStampCountInto( masterPr->startMasterTSCLow );
110 #endif
111 //========================================================================
113 masterEnv = (MasterEnv*)_VMSMasterEnv;
115 //GCC may optimize so doesn't always re-define from frame-storage
116 masterPr = (VirtProcr*)volatileMasterPr; //on stack, reload after jmp
117 thisCoresIdx = masterPr->coreAnimatedBy;
118 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
119 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
121 requestHandler = masterEnv->requestHandler;
122 slaveScheduler = masterEnv->slaveScheduler;
123 semanticEnv = masterEnv->semanticEnv;
125 //First, check for requests from other MasterVPs, and handle them
126 if( currReq = masterEnv->interMasterRequestsFor[thisCoresIdx] )
127 { do
128 { handleInterMasterReq( currReq, semanticEnv, masterPr );
129 }
130 while( currReq = currReq->nextReqst );
131 }
132 //Now, take care of the SlaveVPs
133 //Go through the slots -- if Slave there newly suspended, handle its request
134 // then, either way, ask assigner to fill each slot
135 numSlotsFilled = 0;
136 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
137 {
138 currSlot = schedSlots[ slotIdx ];
140 if( currSlot->workIsDone )
141 {
142 currSlot->workIsDone = FALSE;
143 currSlot->needsProcrAssigned = TRUE;
145 //process requests from slave to master
146 //====================== MEASUREMENT STUFF ===================
147 #ifdef MEAS__TIME_PLUGIN
148 int32 startStamp1, endStamp1;
149 saveLowTimeStampCountInto( startStamp1 );
150 #endif
151 //============================================================
152 (*requestHandler)( currSlot->procrAssignedToSlot, semanticEnv );
153 //====================== MEASUREMENT STUFF ===================
154 #ifdef MEAS__TIME_PLUGIN
155 saveLowTimeStampCountInto( endStamp1 );
156 addIntervalToHist( startStamp1, endStamp1,
157 _VMSMasterEnv->reqHdlrLowTimeHist );
158 addIntervalToHist( startStamp1, endStamp1,
159 _VMSMasterEnv->reqHdlrHighTimeHist );
160 #endif
161 //============================================================
162 }
163 if( currSlot->needsProcrAssigned )
164 { //give slot a new virt procr
165 schedVirtPr =
166 (*slaveScheduler)( semanticEnv, thisCoresIdx );
168 if( schedVirtPr != NULL )
169 { currSlot->procrAssignedToSlot = schedVirtPr;
170 schedVirtPr->schedSlot = currSlot;
171 currSlot->needsProcrAssigned = FALSE;
172 numSlotsFilled += 1;
174 writeVMSQ( schedVirtPr, readyToAnimateQ );
175 }
176 }
177 }
180 #ifdef USE_WORK_STEALING
181 //If no slots filled, means no more work, look for work to steal.
182 if( numSlotsFilled == 0 )
183 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterPr );
184 }
185 #endif
188 #ifdef MEAS__TIME_MASTER
189 saveLowTimeStampCountInto( masterPr->endMasterTSCLow );
190 #endif
192 masterSwitchToCoreLoop(animatingPr);
193 flushRegisters();
194 }//while(1) MasterLoop
195 }
197 /*This is for inter-master communication. Either the master itself or
198 * the plugin sends one of these requests. Some are handled here, by the
199 * master_loop, others are handed off to the plugin.
200 */
201 void inline
202 handleInterMasterReq( InterMasterReqst *currReq, void *_semEnv,
203 VirtProcr *masterPr )
204 { switch( currReq->reqType )
205 { case destVMSCore:
206 handleInterVMSCoreReq( (InterVMSCoreReqst *)currReq, masterPr);
207 break;
208 case destPlugin:
209 (*interPluginReqHdlr)( ((InterPluginReqst *)currReq)->pluginReq,
210 _semEnv );
211 break;
212 default:
213 break;
214 }
215 }
217 void inline
218 handleInterVMSReq( InterVMSCoreReqst *currReq, VirtProcr *masterPr )
219 {
220 switch( currReq->reqType )
221 {
222 case transfer_free_ptr: handleTransferFree( currReq, masterPr );
223 break;
224 }
225 }
229 /*Work Stealing Alg -- racy one
230 *This algorithm has a race condition -- the coreloops are accessing their
231 * own queues at the same time that this work-stealer on a different core
232 * is trying to.
233 *The second stealing alg, below, protects against this.
234 */
235 void inline
236 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
237 VirtProcr *masterPr )
238 {
239 VirtProcr *stolenPr;
240 int32 coreIdx, i;
241 VMSQueueStruc *currQ;
243 stolenPr = NULL;
244 coreIdx = masterPr->coreAnimatedBy;
245 for( i = 0; i < NUM_CORES -1; i++ )
246 {
247 if( coreIdx >= NUM_CORES -1 )
248 { coreIdx = 0;
249 }
250 else
251 { coreIdx++;
252 }
253 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
254 if( numInVMSQ( currQ ) > 0 )
255 { stolenPr = readVMSQ (currQ );
256 break;
257 }
258 }
260 if( stolenPr != NULL )
261 { currSlot->procrAssignedToSlot = stolenPr;
262 stolenPr->schedSlot = currSlot;
263 currSlot->needsProcrAssigned = FALSE;
265 writeVMSQ( stolenPr, readyToAnimateQ );
266 }
267 }
269 /*Work Stealing alg -- protected one
270 *This algorithm makes the common case fast. Make the coreloop passive,
271 * and show its progress. Make the stealer control a gate that coreloop
272 * has to pass.
273 *To avoid interference, only one stealer at a time. Use a global
274 * stealer-lock.
275 *
276 *The pattern is based on a gate -- stealer shuts the gate, then monitors
277 * to be sure any already past make it all the way out, before starting.
278 *So, have a "progress" measure just before the gate, then have two after it,
279 * one is in a "waiting room" outside the gate, the other is at the exit.
280 *Then, the stealer first shuts the gate, then checks the progress measure
281 * outside it, then looks to see if the progress measure at the exit is the
282 * same. If yes, it knows the protected area is empty 'cause no other way
283 * to get in and the last to get in also exited.
284 *If the progress measure at the exit is not the same, then the stealer goes
285 * into a loop checking both the waiting-area and the exit progress-measures
286 * until one of them shows the same as the measure outside the gate. Might
287 * as well re-read the measure outside the gate each go around, just to be
288 * sure. It is guaranteed that one of the two will eventually match the one
289 * outside the gate.
290 *
291 *Here's an informal proof of correctness:
292 *The gate can be closed at any point, and have only four cases:
293 * 1) coreloop made it past the gate-closing but not yet past the exit
294 * 2) coreloop made it past the pre-gate progress update but not yet past
295 * the gate,
296 * 3) coreloop is right before the pre-gate update
297 * 4) coreloop is past the exit and far from the pre-gate update.
298 *
299 * Covering the cases in reverse order,
300 * 4) is not a problem -- stealer will read pre-gate progress, see that it
301 * matches exit progress, and the gate is closed, so stealer can proceed.
302 * 3) stealer will read pre-gate progress just after coreloop updates it..
303 * so stealer goes into a loop until the coreloop causes wait-progress
304 * to match pre-gate progress, so then stealer can proceed
305 * 2) same as 3..
306 * 1) stealer reads pre-gate progress, sees that it's different than exit,
307 * so goes into loop until exit matches pre-gate, now it knows coreloop
308 * is not in protected and cannot get back in, so can proceed.
309 *
310 *Implementation for the stealer:
311 *
312 *First, acquire the stealer lock -- only cores with no work to do will
313 * compete to steal, so not a big performance penalty having only one --
314 * will rarely have multiple stealers in a system with plenty of work -- and
315 * in a system with little work, it doesn't matter.
316 *
317 *Note, have single-reader, single-writer pattern for all variables used to
318 * communicate between stealer and victims
319 *
320 *So, scan the queues of the core loops, until find non-empty. Each core
321 * has its own list that it scans. The list goes in order from closest to
322 * furthest core, so it steals first from close cores. Later can add
323 * taking info from the app about overlapping footprints, and scan all the
324 * others then choose work with the most footprint overlap with the contents
325 * of this core's cache.
326 *
327 *Now, have a victim want to take work from. So, shut the gate in that
328 * coreloop, by setting the "gate closed" var on its stack to TRUE.
329 *Then, read the core's pre-gate progress and compare to the core's exit
330 * progress.
331 *If same, can proceed to take work from the coreloop's queue. When done,
332 * write FALSE to gate closed var.
333 *If different, then enter a loop that reads the pre-gate progress, then
334 * compares to exit progress then to wait progress. When one of two
335 * matches, proceed. Take work from the coreloop's queue. When done,
336 * write FALSE to the gate closed var.
337 *
338 */
339 void inline
340 gateProtected_stealWorkInto( SchedSlot *currSlot,
341 VMSQueueStruc *myReadyToAnimateQ,
342 VirtProcr *masterPr )
343 {
344 VirtProcr *stolenPr;
345 int32 coreIdx, i, haveAVictim, gotLock;
346 VMSQueueStruc *victimsQ;
348 volatile GateStruc *vicGate;
349 int32 coreMightBeInProtected;
353 //see if any other cores have work available to steal
354 haveAVictim = FALSE;
355 coreIdx = masterPr->coreAnimatedBy;
356 for( i = 0; i < NUM_CORES -1; i++ )
357 {
358 if( coreIdx >= NUM_CORES -1 )
359 { coreIdx = 0;
360 }
361 else
362 { coreIdx++;
363 }
364 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
365 if( numInVMSQ( victimsQ ) > 0 )
366 { haveAVictim = TRUE;
367 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
368 break;
369 }
370 }
371 if( !haveAVictim ) return; //no work to steal, exit
373 //have a victim core, now get the stealer-lock
374 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
375 UNLOCKED, LOCKED );
376 if( !gotLock ) return; //go back to core loop, which will re-start master
379 //====== Start Gate-protection =======
380 vicGate->gateClosed = TRUE;
381 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
382 while( coreMightBeInProtected )
383 { //wait until sure
384 if( vicGate->preGateProgress == vicGate->waitProgress )
385 coreMightBeInProtected = FALSE;
386 if( vicGate->preGateProgress == vicGate->exitProgress )
387 coreMightBeInProtected = FALSE;
388 }
390 stolenPr = readVMSQ ( victimsQ );
392 vicGate->gateClosed = FALSE;
393 //======= End Gate-protection =======
396 if( stolenPr != NULL ) //victim could have been in protected and took it
397 { currSlot->procrAssignedToSlot = stolenPr;
398 stolenPr->schedSlot = currSlot;
399 currSlot->needsProcrAssigned = FALSE;
401 writeVMSQ( stolenPr, myReadyToAnimateQ );
402 }
404 //unlock the work stealing lock
405 _VMSMasterEnv->workStealingLock = UNLOCKED;
406 }