Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
view MasterLoop.c @ 167:981acd1db6af
Separate UCC recording from VMS core and put it into SSR plugin
| author | Nina Engelhardt |
|---|---|
| date | Mon, 05 Dec 2011 18:59:48 +0100 |
| parents | aefd87f9d12f |
| children | 3bd35fc83c61 |
line source
1 /*
2 * Copyright 2010 OpenSourceStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
9 #include <stdio.h>
10 #include <stddef.h>
12 #include "VMS.h"
13 #include "ProcrContext.h"
16 //===========================================================================
17 void inline
18 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
19 VirtProcr *masterPr );
21 //===========================================================================
25 /*This code is animated by the virtual Master processor.
26 *
27 *Polls each sched slot exactly once, hands any requests made by a newly
28 * done slave to the "request handler" plug-in function
29 *
30 *Any slots that need a virt procr assigned are given to the "schedule"
31 * plug-in function, which tries to assign a virt procr (slave) to it.
32 *
33 *When all slots needing a processor have been given to the schedule plug-in,
34 * a fraction of the procrs successfully scheduled are put into the
35 * work queue, then a continuation of this function is put in, then the rest
36 * of the virt procrs that were successfully scheduled.
37 *
38 *The first thing the continuation does is busy-wait until the previous
39 * animation completes. This is because an (unlikely) continuation may
40 * sneak through queue before previous continuation is done putting second
41 * part of scheduled slaves in, which is the only race condition.
42 *
43 */
45 /*May 29, 2010 -- birth a Master during init so that first core loop to
46 * start running gets it and does all the stuff for a newly born --
47 * from then on, will be doing continuation, but do suspension self
48 * directly at end of master loop
49 *So VMS__init just births the master virtual processor same way it births
50 * all the others -- then does any extra setup needed and puts it into the
51 * work queue.
52 *However means have to make masterEnv a global static volatile the same way
53 * did with readyToAnimateQ in core loop. -- for performance, put the
54 * jump to the core loop directly in here, and have it directly jump back.
55 *
56 *
57 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
58 * avoids the suspected bug in the system stack that causes bizarre faults
59 * at random places in the system code.
60 *
61 *So, this function is coupled to each of the MasterVPs, -- meaning this
62 * function can't rely on a particular stack and frame -- each MasterVP that
63 * animates this function has a different one.
64 *
65 *At this point, the masterLoop does not write itself into the queue anymore,
66 * instead, the coreLoop acquires the masterLock when it has nothing to
67 * animate, and then animates its own masterLoop. However, still try to put
68 * several AppVPs into the queue to amortize the startup cost of switching
69 * to the MasterVP. Note, don't have to worry about latency of requests much
70 * because most requests generate work for same core -- only latency issue
71 * is case when other cores starved and one core's requests generate work
72 * for them -- so keep max in queue to 3 or 4..
73 */
74 void masterLoop( void *initData, VirtProcr *animatingPr )
75 {
76 int32 slotIdx, numSlotsFilled;
77 VirtProcr *schedVirtPr;
78 SchedSlot *currSlot, **schedSlots;
79 MasterEnv *masterEnv;
80 VMSQueueStruc *readyToAnimateQ;
82 SlaveScheduler slaveScheduler;
83 RequestHandler requestHandler;
84 void *semanticEnv;
86 int32 thisCoresIdx;
87 VirtProcr *masterPr;
88 volatile VirtProcr *volatileMasterPr;
90 volatileMasterPr = animatingPr;
91 masterPr = (VirtProcr*)volatileMasterPr; //used to force re-define after jmp
93 bulb b = new_bulb();
94 numSlotsFilled=1;
95 //First animation of each MasterVP will in turn animate this part
96 // of setup code.. (VP creator sets up the stack as if this function
97 // was called normally, but actually get here by jmp)
98 //So, setup values about stack ptr, jmp pt and all that
99 //masterPr->nextInstrPt = &&masterLoopStartPt;
102 //Note, got rid of writing the stack and frame ptr up here, because
103 // only one
104 // core can ever animate a given MasterVP, so don't need to communicate
105 // new frame and stack ptr to the MasterVP storage before a second
106 // version of that MasterVP can get animated on a different core.
107 //Also got rid of the busy-wait.
110 //masterLoopStartPt:
111 while(1){
113 //============================= MEASUREMENT STUFF ========================
114 #ifdef MEAS__TIME_MASTER
115 //Total Master time includes one coreloop time -- just assume the core
116 // loop time is same for Master as for AppVPs, even though it may be
117 // smaller due to higher predictability of the fixed jmp.
118 saveLowTimeStampCountInto( masterPr->startMasterTSCLow );
119 #endif
120 //========================================================================
122 masterEnv = (MasterEnv*)_VMSMasterEnv;
124 //GCC may optimize so doesn't always re-define from frame-storage
125 masterPr = (VirtProcr*)volatileMasterPr; //just to make sure after jmp
126 thisCoresIdx = masterPr->coreAnimatedBy;
127 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
128 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
130 requestHandler = masterEnv->requestHandler;
131 slaveScheduler = masterEnv->slaveScheduler;
132 semanticEnv = masterEnv->semanticEnv;
134 #ifdef DETECT_LOOP_GRAPH
135 if(numSlotsFilled > 0){
136 b = new_bulb();
137 addToDynArray((void*)b,masterEnv->loop_graph_array_info);
138 set_bulb_core(b,thisCoresIdx);
139 set_bulb_id(b,masterEnv->loop_counter[thisCoresIdx]++);
140 }
141 #endif
143 //Poll each slot's Done flag
144 numSlotsFilled = 0;
145 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
146 {
147 currSlot = schedSlots[ slotIdx ];
149 if( currSlot->workIsDone )
150 {
151 currSlot->workIsDone = FALSE;
152 currSlot->needsProcrAssigned = TRUE;
154 //process requests from slave to master
155 //====================== MEASUREMENT STUFF ===================
156 #ifdef MEAS__TIME_PLUGIN
157 int32 startStamp1, endStamp1;
158 saveLowTimeStampCountInto( startStamp1 );
159 #endif
160 #ifdef MEAS__PERF_COUNTERS
161 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
162 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
163 lastRecord->req_core = thisCoresIdx;
164 saveCyclesAndInstrs(thisCoresIdx,lastRecord->next_task_req_cycles,lastRecord->next_task_req_instrs);
165 //End of task, start of next task
166 //print counters from last run
167 addToDynArray((void*)lastRecord,masterEnv->counter_history_array_info);
168 print_record_csv_to_file(lastRecord,_VMSMasterEnv->counteroutput);
174 //print_record_human_readable(lastRecord);
175 //create new entry in record array
176 CounterRecord* newRecord = VMS__malloc(sizeof(CounterRecord));
177 newRecord->req_core = thisCoresIdx;
178 newRecord->vp_id = currSlot->procrAssignedToSlot->procrID;
179 newRecord->task_position = lastRecord->task_position + 1;
180 newRecord->req_cycles = lastRecord->next_task_req_cycles;
181 newRecord->req_instrs = lastRecord->next_task_req_instrs;
182 getReturnAddressBeforeLibraryCall(currSlot->procrAssignedToSlot, &(newRecord->addr_of_libcall_for_req));
183 addToDynArray( (void*) newRecord, currSlot->procrAssignedToSlot->counter_history_array_info);
184 lastRecord = newRecord;
185 #endif
186 //============================================================
187 (*requestHandler)( currSlot->procrAssignedToSlot, semanticEnv );
188 //====================== MEASUREMENT STUFF ===================
189 #ifdef MEAS__TIME_PLUGIN
190 saveLowTimeStampCountInto( endStamp1 );
191 addIntervalToHist( startStamp1, endStamp1,
192 _VMSMasterEnv->reqHdlrLowTimeHist );
193 addIntervalToHist( startStamp1, endStamp1,
194 _VMSMasterEnv->reqHdlrHighTimeHist );
195 #endif
196 #ifdef MEAS__PERF_COUNTERS
197 //done with constraints check
198 saveCyclesAndInstrs(thisCoresIdx,lastRecord->sc_done_cycles,lastRecord->sc_done_instrs);
199 saveLowTimeStampCountInto(lastRecord->blocked_timestamp);
200 #endif
201 //============================================================
202 }
203 if( currSlot->needsProcrAssigned )
204 { //give slot a new virt procr
205 #ifdef MEAS__PERF_COUNTERS
206 //start assigner
207 uint64 tmp_cycles;
208 uint64 tmp_instrs;
209 saveCyclesAndInstrs(thisCoresIdx,tmp_cycles,tmp_instrs);
210 #endif
211 schedVirtPr =
212 (*slaveScheduler)( semanticEnv, thisCoresIdx );
214 if( schedVirtPr != NULL )
215 { currSlot->procrAssignedToSlot = schedVirtPr;
216 schedVirtPr->schedSlot = currSlot;
217 currSlot->needsProcrAssigned = FALSE;
218 numSlotsFilled += 1;
219 schedVirtPr->numTimesScheduled++;
220 #ifdef MEAS__PERF_COUNTERS
221 //end assigner
222 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
223 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
224 lastRecord->assigning_core = thisCoresIdx;
225 lastRecord->start_assign_cycles = tmp_cycles;
226 lastRecord->start_assign_instrs = tmp_instrs;
227 saveCyclesAndInstrs(thisCoresIdx,lastRecord->end_assign_cycles,lastRecord->end_assign_instrs);
228 #endif
229 writeVMSQ( schedVirtPr, readyToAnimateQ );
230 }
231 }
232 #ifdef DETECT_LOOP_GRAPH
233 if(!currSlot->needsProcrAssigned) {
234 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
235 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
236 set_bulb_member(b,slotIdx,currSlot->procrAssignedToSlot->procrID,lastRecord->task_position);
237 }
238 #endif
239 }
242 #ifdef USE_WORK_STEALING
243 //If no slots filled, means no more work, look for work to steal.
244 if( numSlotsFilled == 0 )
245 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterPr );
246 }
247 #endif
250 #ifdef MEAS__TIME_MASTER
251 saveLowTimeStampCountInto( masterPr->endMasterTSCLow );
252 #endif
254 masterSwitchToCoreLoop(animatingPr);
255 flushRegisters();
256 }//MasterLoop
259 }
263 /*This has a race condition -- the coreloops are accessing their own queues
264 * at the same time that this work-stealer on a different core is trying to
265 */
266 void inline
267 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
268 VirtProcr *masterPr )
269 {
270 VirtProcr *stolenPr;
271 int32 coreIdx, i;
272 VMSQueueStruc *currQ;
274 stolenPr = NULL;
275 coreIdx = masterPr->coreAnimatedBy;
276 for( i = 0; i < NUM_CORES -1; i++ )
277 {
278 if( coreIdx >= NUM_CORES -1 )
279 { coreIdx = 0;
280 }
281 else
282 { coreIdx++;
283 }
284 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
285 if( numInVMSQ( currQ ) > 0 )
286 { stolenPr = readVMSQ (currQ );
287 break;
288 }
289 }
291 if( stolenPr != NULL )
292 { currSlot->procrAssignedToSlot = stolenPr;
293 stolenPr->schedSlot = currSlot;
294 currSlot->needsProcrAssigned = FALSE;
296 writeVMSQ( stolenPr, readyToAnimateQ );
297 }
298 }
300 /*This algorithm makes the common case fast. Make the coreloop passive,
301 * and show its progress. Make the stealer control a gate that coreloop
302 * has to pass.
303 *To avoid interference, only one stealer at a time. Use a global
304 * stealer-lock.
305 *
306 *The pattern is based on a gate -- stealer shuts the gate, then monitors
307 * to be sure any already past make it all the way out, before starting.
308 *So, have a "progress" measure just before the gate, then have two after it,
309 * one is in a "waiting room" outside the gate, the other is at the exit.
310 *Then, the stealer first shuts the gate, then checks the progress measure
311 * outside it, then looks to see if the progress measure at the exit is the
312 * same. If yes, it knows the protected area is empty 'cause no other way
313 * to get in and the last to get in also exited.
314 *If the progress measure at the exit is not the same, then the stealer goes
315 * into a loop checking both the waiting-area and the exit progress-measures
316 * until one of them shows the same as the measure outside the gate. Might
317 * as well re-read the measure outside the gate each go around, just to be
318 * sure. It is guaranteed that one of the two will eventually match the one
319 * outside the gate.
320 *
321 *Here's an informal proof of correctness:
322 *The gate can be closed at any point, and have only four cases:
323 * 1) coreloop made it past the gate-closing but not yet past the exit
324 * 2) coreloop made it past the pre-gate progress update but not yet past
325 * the gate,
326 * 3) coreloop is right before the pre-gate update
327 * 4) coreloop is past the exit and far from the pre-gate update.
328 *
329 * Covering the cases in reverse order,
330 * 4) is not a problem -- stealer will read pre-gate progress, see that it
331 * matches exit progress, and the gate is closed, so stealer can proceed.
332 * 3) stealer will read pre-gate progress just after coreloop updates it..
333 * so stealer goes into a loop until the coreloop causes wait-progress
334 * to match pre-gate progress, so then stealer can proceed
335 * 2) same as 3..
336 * 1) stealer reads pre-gate progress, sees that it's different than exit,
337 * so goes into loop until exit matches pre-gate, now it knows coreloop
338 * is not in protected and cannot get back in, so can proceed.
339 *
340 *Implementation for the stealer:
341 *
342 *First, acquire the stealer lock -- only cores with no work to do will
343 * compete to steal, so not a big performance penalty having only one --
344 * will rarely have multiple stealers in a system with plenty of work -- and
345 * in a system with little work, it doesn't matter.
346 *
347 *Note, have single-reader, single-writer pattern for all variables used to
348 * communicate between stealer and victims
349 *
350 *So, scan the queues of the core loops, until find non-empty. Each core
351 * has its own list that it scans. The list goes in order from closest to
352 * furthest core, so it steals first from close cores. Later can add
353 * taking info from the app about overlapping footprints, and scan all the
354 * others then choose work with the most footprint overlap with the contents
355 * of this core's cache.
356 *
357 *Now, have a victim want to take work from. So, shut the gate in that
358 * coreloop, by setting the "gate closed" var on its stack to TRUE.
359 *Then, read the core's pre-gate progress and compare to the core's exit
360 * progress.
361 *If same, can proceed to take work from the coreloop's queue. When done,
362 * write FALSE to gate closed var.
363 *If different, then enter a loop that reads the pre-gate progress, then
364 * compares to exit progress then to wait progress. When one of two
365 * matches, proceed. Take work from the coreloop's queue. When done,
366 * write FALSE to the gate closed var.
367 *
368 */
369 void inline
370 gateProtected_stealWorkInto( SchedSlot *currSlot,
371 VMSQueueStruc *myReadyToAnimateQ,
372 VirtProcr *masterPr )
373 {
374 VirtProcr *stolenPr;
375 int32 coreIdx, i, haveAVictim, gotLock;
376 VMSQueueStruc *victimsQ;
378 volatile GateStruc *vicGate;
379 int32 coreMightBeInProtected;
383 //see if any other cores have work available to steal
384 haveAVictim = FALSE;
385 coreIdx = masterPr->coreAnimatedBy;
386 for( i = 0; i < NUM_CORES -1; i++ )
387 {
388 if( coreIdx >= NUM_CORES -1 )
389 { coreIdx = 0;
390 }
391 else
392 { coreIdx++;
393 }
394 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
395 if( numInVMSQ( victimsQ ) > 0 )
396 { haveAVictim = TRUE;
397 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
398 break;
399 }
400 }
401 if( !haveAVictim ) return; //no work to steal, exit
403 //have a victim core, now get the stealer-lock
404 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
405 UNLOCKED, LOCKED );
406 if( !gotLock ) return; //go back to core loop, which will re-start master
409 //====== Start Gate-protection =======
410 vicGate->gateClosed = TRUE;
411 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
412 while( coreMightBeInProtected )
413 { //wait until sure
414 if( vicGate->preGateProgress == vicGate->waitProgress )
415 coreMightBeInProtected = FALSE;
416 if( vicGate->preGateProgress == vicGate->exitProgress )
417 coreMightBeInProtected = FALSE;
418 }
420 stolenPr = readVMSQ ( victimsQ );
422 vicGate->gateClosed = FALSE;
423 //======= End Gate-protection =======
426 if( stolenPr != NULL ) //victim could have been in protected and taken
427 { currSlot->procrAssignedToSlot = stolenPr;
428 stolenPr->schedSlot = currSlot;
429 currSlot->needsProcrAssigned = FALSE;
431 writeVMSQ( stolenPr, myReadyToAnimateQ );
432 }
434 //unlock the work stealing lock
435 _VMSMasterEnv->workStealingLock = UNLOCKED;
436 }
