| rev |
line source |
|
msach@94
|
1 /*
|
|
msach@94
|
2 * Copyright 2010 OpenSourceStewardshipFoundation
|
|
msach@94
|
3 *
|
|
msach@94
|
4 * Licensed under BSD
|
|
msach@94
|
5 */
|
|
msach@94
|
6
|
|
msach@94
|
7
|
|
msach@94
|
8
|
|
msach@94
|
9 #include <stdio.h>
|
|
msach@94
|
10 #include <stddef.h>
|
|
msach@94
|
11
|
|
msach@94
|
12 #include "VMS.h"
|
|
msach@94
|
13 #include "ProcrContext.h"
|
|
msach@94
|
14
|
|
msach@94
|
15
|
|
msach@94
|
16 //===========================================================================
|
|
msach@94
|
17 void inline
|
|
msach@94
|
18 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
|
|
msach@94
|
19 VirtProcr *masterPr );
|
|
msach@94
|
20
|
|
msach@94
|
21 //===========================================================================
|
|
msach@94
|
22
|
|
msach@94
|
23
|
|
msach@94
|
24
|
|
msach@94
|
25 /*This code is animated by the virtual Master processor.
|
|
msach@94
|
26 *
|
|
msach@94
|
27 *Polls each sched slot exactly once, hands any requests made by a newly
|
|
msach@94
|
28 * done slave to the "request handler" plug-in function
|
|
msach@94
|
29 *
|
|
msach@94
|
30 *Any slots that need a virt procr assigned are given to the "schedule"
|
|
msach@94
|
31 * plug-in function, which tries to assign a virt procr (slave) to it.
|
|
msach@94
|
32 *
|
|
msach@94
|
33 *When all slots needing a processor have been given to the schedule plug-in,
|
|
msach@94
|
34 * a fraction of the procrs successfully scheduled are put into the
|
|
msach@94
|
35 * work queue, then a continuation of this function is put in, then the rest
|
|
msach@94
|
36 * of the virt procrs that were successfully scheduled.
|
|
msach@94
|
37 *
|
|
msach@94
|
38 *The first thing the continuation does is busy-wait until the previous
|
|
msach@94
|
39 * animation completes. This is because an (unlikely) continuation may
|
|
msach@94
|
40 * sneak through queue before previous continuation is done putting second
|
|
msach@94
|
41 * part of scheduled slaves in, which is the only race condition.
|
|
msach@94
|
42 *
|
|
msach@94
|
43 */
|
|
msach@94
|
44
|
|
msach@94
|
45 /*May 29, 2010 -- birth a Master during init so that first core loop to
|
|
msach@94
|
46 * start running gets it and does all the stuff for a newly born --
|
|
msach@94
|
47 * from then on, will be doing continuation, but do suspension self
|
|
msach@94
|
48 * directly at end of master loop
|
|
msach@94
|
49 *So VMS__init just births the master virtual processor same way it births
|
|
msach@94
|
50 * all the others -- then does any extra setup needed and puts it into the
|
|
msach@94
|
51 * work queue.
|
|
msach@94
|
52 *However means have to make masterEnv a global static volatile the same way
|
|
msach@94
|
53 * did with readyToAnimateQ in core loop. -- for performance, put the
|
|
msach@94
|
54 * jump to the core loop directly in here, and have it directly jump back.
|
|
msach@94
|
55 *
|
|
msach@94
|
56 *
|
|
msach@94
|
57 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
|
|
msach@94
|
58 * avoids the suspected bug in the system stack that causes bizarre faults
|
|
msach@94
|
59 * at random places in the system code.
|
|
msach@94
|
60 *
|
|
msach@94
|
61 *So, this function is coupled to each of the MasterVPs, -- meaning this
|
|
msach@94
|
62 * function can't rely on a particular stack and frame -- each MasterVP that
|
|
msach@94
|
63 * animates this function has a different one.
|
|
msach@94
|
64 *
|
|
msach@94
|
65 *At this point, the masterLoop does not write itself into the queue anymore,
|
|
msach@94
|
66 * instead, the coreLoop acquires the masterLock when it has nothing to
|
|
msach@94
|
67 * animate, and then animates its own masterLoop. However, still try to put
|
|
msach@94
|
68 * several AppVPs into the queue to amortize the startup cost of switching
|
|
msach@94
|
69 * to the MasterVP. Note, don't have to worry about latency of requests much
|
|
msach@94
|
70 * because most requests generate work for same core -- only latency issue
|
|
msach@94
|
71 * is case when other cores starved and one core's requests generate work
|
|
msach@94
|
72 * for them -- so keep max in queue to 3 or 4..
|
|
msach@94
|
73 */
|
|
msach@94
|
74 void masterLoop( void *initData, VirtProcr *animatingPr )
|
|
msach@94
|
75 {
|
|
msach@94
|
76 int32 slotIdx, numSlotsFilled;
|
|
msach@94
|
77 VirtProcr *schedVirtPr;
|
|
msach@94
|
78 SchedSlot *currSlot, **schedSlots;
|
|
msach@94
|
79 MasterEnv *masterEnv;
|
|
msach@94
|
80 VMSQueueStruc *readyToAnimateQ;
|
|
msach@94
|
81
|
|
msach@94
|
82 SlaveScheduler slaveScheduler;
|
|
msach@94
|
83 RequestHandler requestHandler;
|
|
msach@94
|
84 void *semanticEnv;
|
|
msach@94
|
85
|
|
msach@94
|
86 int32 thisCoresIdx;
|
|
msach@94
|
87 VirtProcr *masterPr;
|
|
msach@94
|
88 volatile VirtProcr *volatileMasterPr;
|
|
msach@94
|
89
|
|
msach@94
|
90 volatileMasterPr = animatingPr;
|
|
msach@94
|
91 masterPr = (VirtProcr*)volatileMasterPr; //used to force re-define after jmp
|
|
msach@94
|
92
|
|
msach@94
|
93 //First animation of each MasterVP will in turn animate this part
|
|
msach@94
|
94 // of setup code.. (VP creator sets up the stack as if this function
|
|
msach@94
|
95 // was called normally, but actually get here by jmp)
|
|
msach@94
|
96 //So, setup values about stack ptr, jmp pt and all that
|
|
msach@94
|
97 //masterPr->nextInstrPt = &&masterLoopStartPt;
|
|
msach@94
|
98
|
|
msach@94
|
99
|
|
msach@94
|
100 //Note, got rid of writing the stack and frame ptr up here, because
|
|
msach@94
|
101 // only one
|
|
msach@94
|
102 // core can ever animate a given MasterVP, so don't need to communicate
|
|
msach@94
|
103 // new frame and stack ptr to the MasterVP storage before a second
|
|
msach@94
|
104 // version of that MasterVP can get animated on a different core.
|
|
msach@94
|
105 //Also got rid of the busy-wait.
|
|
msach@94
|
106
|
|
msach@94
|
107
|
|
msach@94
|
108 //masterLoopStartPt:
|
|
msach@94
|
109 while(1){
|
|
msach@94
|
110
|
|
msach@94
|
111 //============================= MEASUREMENT STUFF ========================
|
|
msach@94
|
112 #ifdef MEAS__TIME_MASTER
|
|
msach@94
|
113 //Total Master time includes one coreloop time -- just assume the core
|
|
msach@94
|
114 // loop time is same for Master as for AppVPs, even though it may be
|
|
msach@94
|
115 // smaller due to higher predictability of the fixed jmp.
|
|
msach@94
|
116 saveLowTimeStampCountInto( masterPr->startMasterTSCLow );
|
|
msach@94
|
117 #endif
|
|
msach@94
|
118 //========================================================================
|
|
msach@94
|
119
|
|
msach@94
|
120 masterEnv = (MasterEnv*)_VMSMasterEnv;
|
|
msach@94
|
121
|
|
msach@94
|
122 //GCC may optimize so doesn't always re-define from frame-storage
|
|
msach@94
|
123 masterPr = (VirtProcr*)volatileMasterPr; //just to make sure after jmp
|
|
msach@94
|
124 thisCoresIdx = masterPr->coreAnimatedBy;
|
|
msach@94
|
125 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
|
|
msach@94
|
126 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
|
|
msach@94
|
127
|
|
msach@94
|
128 requestHandler = masterEnv->requestHandler;
|
|
msach@94
|
129 slaveScheduler = masterEnv->slaveScheduler;
|
|
msach@94
|
130 semanticEnv = masterEnv->semanticEnv;
|
|
msach@94
|
131
|
|
msach@94
|
132
|
|
msach@94
|
133 //Poll each slot's Done flag
|
|
msach@94
|
134 numSlotsFilled = 0;
|
|
msach@94
|
135 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
|
|
msach@94
|
136 {
|
|
msach@94
|
137 currSlot = schedSlots[ slotIdx ];
|
|
msach@94
|
138
|
|
msach@94
|
139 if( currSlot->workIsDone )
|
|
msach@94
|
140 {
|
|
msach@94
|
141 currSlot->workIsDone = FALSE;
|
|
msach@94
|
142 currSlot->needsProcrAssigned = TRUE;
|
|
msach@94
|
143
|
|
msach@94
|
144 //process requests from slave to master
|
|
msach@94
|
145 //====================== MEASUREMENT STUFF ===================
|
|
msach@94
|
146 #ifdef MEAS__TIME_PLUGIN
|
|
msach@94
|
147 int32 startStamp1, endStamp1;
|
|
msach@94
|
148 saveLowTimeStampCountInto( startStamp1 );
|
|
msach@94
|
149 #endif
|
|
Nina@109
|
150 #ifdef MEAS__PERF_COUNTERS
|
|
engelhardt@108
|
151 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
|
|
engelhardt@108
|
152 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
|
|
engelhardt@108
|
153 lastRecord->req_core = thisCoresIdx;
|
|
Nina@131
|
154 saveCyclesAndInstrs(thisCoresIdx,lastRecord->next_task_req_cycles,lastRecord->next_task_req_instrs);
|
|
engelhardt@108
|
155 //End of task, start of next task
|
|
engelhardt@108
|
156 //print counters from last run
|
|
Nina@131
|
157 addToDynArray((void*)lastRecord,masterEnv->counter_history_array_info);
|
|
Nina@110
|
158 print_record_csv_to_file(lastRecord,_VMSMasterEnv->counteroutput);
|
|
Nina@130
|
159
|
|
Nina@167
|
160
|
|
Nina@130
|
161
|
|
Nina@166
|
162
|
|
Nina@166
|
163
|
|
Nina@129
|
164 //print_record_human_readable(lastRecord);
|
|
Nina@109
|
165 //create new entry in record array
|
|
engelhardt@108
|
166 CounterRecord* newRecord = VMS__malloc(sizeof(CounterRecord));
|
|
engelhardt@108
|
167 newRecord->req_core = thisCoresIdx;
|
|
Nina@109
|
168 newRecord->vp_id = currSlot->procrAssignedToSlot->procrID;
|
|
Nina@111
|
169 newRecord->task_position = lastRecord->task_position + 1;
|
|
Nina@131
|
170 newRecord->req_cycles = lastRecord->next_task_req_cycles;
|
|
Nina@131
|
171 newRecord->req_instrs = lastRecord->next_task_req_instrs;
|
|
Nina@110
|
172 getReturnAddressBeforeLibraryCall(currSlot->procrAssignedToSlot, &(newRecord->addr_of_libcall_for_req));
|
|
engelhardt@108
|
173 addToDynArray( (void*) newRecord, currSlot->procrAssignedToSlot->counter_history_array_info);
|
|
engelhardt@108
|
174 lastRecord = newRecord;
|
|
engelhardt@108
|
175 #endif
|
|
msach@94
|
176 //============================================================
|
|
msach@94
|
177 (*requestHandler)( currSlot->procrAssignedToSlot, semanticEnv );
|
|
msach@94
|
178 //====================== MEASUREMENT STUFF ===================
|
|
msach@94
|
179 #ifdef MEAS__TIME_PLUGIN
|
|
msach@94
|
180 saveLowTimeStampCountInto( endStamp1 );
|
|
msach@94
|
181 addIntervalToHist( startStamp1, endStamp1,
|
|
msach@94
|
182 _VMSMasterEnv->reqHdlrLowTimeHist );
|
|
msach@94
|
183 addIntervalToHist( startStamp1, endStamp1,
|
|
msach@94
|
184 _VMSMasterEnv->reqHdlrHighTimeHist );
|
|
msach@94
|
185 #endif
|
|
Nina@109
|
186 #ifdef MEAS__PERF_COUNTERS
|
|
Nina@109
|
187 //done with constraints check
|
|
Nina@109
|
188 saveCyclesAndInstrs(thisCoresIdx,lastRecord->sc_done_cycles,lastRecord->sc_done_instrs);
|
|
Nina@109
|
189 saveLowTimeStampCountInto(lastRecord->blocked_timestamp);
|
|
engelhardt@108
|
190 #endif
|
|
msach@94
|
191 //============================================================
|
|
msach@94
|
192 }
|
|
msach@94
|
193 if( currSlot->needsProcrAssigned )
|
|
msach@94
|
194 { //give slot a new virt procr
|
|
Nina@109
|
195 #ifdef MEAS__PERF_COUNTERS
|
|
engelhardt@108
|
196 //start assigner
|
|
engelhardt@108
|
197 uint64 tmp_cycles;
|
|
engelhardt@108
|
198 uint64 tmp_instrs;
|
|
Nina@109
|
199 saveCyclesAndInstrs(thisCoresIdx,tmp_cycles,tmp_instrs);
|
|
engelhardt@108
|
200 #endif
|
|
msach@94
|
201 schedVirtPr =
|
|
nengel@177
|
202 (*slaveScheduler)( semanticEnv, thisCoresIdx, slotIdx );
|
|
Nina@109
|
203
|
|
msach@94
|
204 if( schedVirtPr != NULL )
|
|
msach@94
|
205 { currSlot->procrAssignedToSlot = schedVirtPr;
|
|
msach@94
|
206 schedVirtPr->schedSlot = currSlot;
|
|
msach@94
|
207 currSlot->needsProcrAssigned = FALSE;
|
|
msach@94
|
208 numSlotsFilled += 1;
|
|
nengel@177
|
209
|
|
Nina@109
|
210 #ifdef MEAS__PERF_COUNTERS
|
|
engelhardt@108
|
211 //end assigner
|
|
engelhardt@108
|
212 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
|
|
engelhardt@108
|
213 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
|
|
engelhardt@108
|
214 lastRecord->assigning_core = thisCoresIdx;
|
|
engelhardt@108
|
215 lastRecord->start_assign_cycles = tmp_cycles;
|
|
engelhardt@108
|
216 lastRecord->start_assign_instrs = tmp_instrs;
|
|
Nina@109
|
217 saveCyclesAndInstrs(thisCoresIdx,lastRecord->end_assign_cycles,lastRecord->end_assign_instrs);
|
|
engelhardt@108
|
218 #endif
|
|
nengel@177
|
219
|
|
msach@94
|
220 writeVMSQ( schedVirtPr, readyToAnimateQ );
|
|
msach@94
|
221 }
|
|
msach@94
|
222 }
|
|
nengel@177
|
223
|
|
msach@94
|
224 }
|
|
msach@94
|
225
|
|
msach@94
|
226
|
|
msach@94
|
227 #ifdef USE_WORK_STEALING
|
|
msach@94
|
228 //If no slots filled, means no more work, look for work to steal.
|
|
msach@94
|
229 if( numSlotsFilled == 0 )
|
|
msach@94
|
230 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterPr );
|
|
msach@94
|
231 }
|
|
msach@94
|
232 #endif
|
|
msach@94
|
233
|
|
msach@94
|
234
|
|
msach@94
|
235 #ifdef MEAS__TIME_MASTER
|
|
msach@94
|
236 saveLowTimeStampCountInto( masterPr->endMasterTSCLow );
|
|
msach@94
|
237 #endif
|
|
msach@94
|
238
|
|
msach@94
|
239 masterSwitchToCoreLoop(animatingPr);
|
|
msach@94
|
240 flushRegisters();
|
|
msach@94
|
241 }//MasterLoop
|
|
msach@94
|
242
|
|
msach@94
|
243
|
|
msach@94
|
244 }
|
|
msach@94
|
245
|
|
msach@94
|
246
|
|
msach@94
|
247
|
|
msach@94
|
248 /*This has a race condition -- the coreloops are accessing their own queues
|
|
msach@94
|
249 * at the same time that this work-stealer on a different core is trying to
|
|
msach@94
|
250 */
|
|
msach@94
|
251 void inline
|
|
msach@94
|
252 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
|
|
msach@94
|
253 VirtProcr *masterPr )
|
|
msach@94
|
254 {
|
|
msach@94
|
255 VirtProcr *stolenPr;
|
|
msach@94
|
256 int32 coreIdx, i;
|
|
msach@94
|
257 VMSQueueStruc *currQ;
|
|
msach@94
|
258
|
|
msach@94
|
259 stolenPr = NULL;
|
|
msach@94
|
260 coreIdx = masterPr->coreAnimatedBy;
|
|
msach@94
|
261 for( i = 0; i < NUM_CORES -1; i++ )
|
|
msach@94
|
262 {
|
|
msach@94
|
263 if( coreIdx >= NUM_CORES -1 )
|
|
msach@94
|
264 { coreIdx = 0;
|
|
msach@94
|
265 }
|
|
msach@94
|
266 else
|
|
msach@94
|
267 { coreIdx++;
|
|
msach@94
|
268 }
|
|
msach@94
|
269 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
|
|
msach@94
|
270 if( numInVMSQ( currQ ) > 0 )
|
|
msach@94
|
271 { stolenPr = readVMSQ (currQ );
|
|
msach@94
|
272 break;
|
|
msach@94
|
273 }
|
|
msach@94
|
274 }
|
|
msach@94
|
275
|
|
msach@94
|
276 if( stolenPr != NULL )
|
|
msach@94
|
277 { currSlot->procrAssignedToSlot = stolenPr;
|
|
msach@94
|
278 stolenPr->schedSlot = currSlot;
|
|
msach@94
|
279 currSlot->needsProcrAssigned = FALSE;
|
|
msach@94
|
280
|
|
msach@94
|
281 writeVMSQ( stolenPr, readyToAnimateQ );
|
|
msach@94
|
282 }
|
|
msach@94
|
283 }
|
|
msach@94
|
284
|
|
msach@94
|
285 /*This algorithm makes the common case fast. Make the coreloop passive,
|
|
msach@94
|
286 * and show its progress. Make the stealer control a gate that coreloop
|
|
msach@94
|
287 * has to pass.
|
|
msach@94
|
288 *To avoid interference, only one stealer at a time. Use a global
|
|
msach@94
|
289 * stealer-lock.
|
|
msach@94
|
290 *
|
|
msach@94
|
291 *The pattern is based on a gate -- stealer shuts the gate, then monitors
|
|
msach@94
|
292 * to be sure any already past make it all the way out, before starting.
|
|
msach@94
|
293 *So, have a "progress" measure just before the gate, then have two after it,
|
|
msach@94
|
294 * one is in a "waiting room" outside the gate, the other is at the exit.
|
|
msach@94
|
295 *Then, the stealer first shuts the gate, then checks the progress measure
|
|
msach@94
|
296 * outside it, then looks to see if the progress measure at the exit is the
|
|
msach@94
|
297 * same. If yes, it knows the protected area is empty 'cause no other way
|
|
msach@94
|
298 * to get in and the last to get in also exited.
|
|
msach@94
|
299 *If the progress measure at the exit is not the same, then the stealer goes
|
|
msach@94
|
300 * into a loop checking both the waiting-area and the exit progress-measures
|
|
msach@94
|
301 * until one of them shows the same as the measure outside the gate. Might
|
|
msach@94
|
302 * as well re-read the measure outside the gate each go around, just to be
|
|
msach@94
|
303 * sure. It is guaranteed that one of the two will eventually match the one
|
|
msach@94
|
304 * outside the gate.
|
|
msach@94
|
305 *
|
|
msach@94
|
306 *Here's an informal proof of correctness:
|
|
msach@94
|
307 *The gate can be closed at any point, and have only four cases:
|
|
msach@94
|
308 * 1) coreloop made it past the gate-closing but not yet past the exit
|
|
msach@94
|
309 * 2) coreloop made it past the pre-gate progress update but not yet past
|
|
msach@94
|
310 * the gate,
|
|
msach@94
|
311 * 3) coreloop is right before the pre-gate update
|
|
msach@94
|
312 * 4) coreloop is past the exit and far from the pre-gate update.
|
|
msach@94
|
313 *
|
|
msach@94
|
314 * Covering the cases in reverse order,
|
|
msach@94
|
315 * 4) is not a problem -- stealer will read pre-gate progress, see that it
|
|
msach@94
|
316 * matches exit progress, and the gate is closed, so stealer can proceed.
|
|
msach@94
|
317 * 3) stealer will read pre-gate progress just after coreloop updates it..
|
|
msach@94
|
318 * so stealer goes into a loop until the coreloop causes wait-progress
|
|
msach@94
|
319 * to match pre-gate progress, so then stealer can proceed
|
|
msach@94
|
320 * 2) same as 3..
|
|
msach@94
|
321 * 1) stealer reads pre-gate progress, sees that it's different than exit,
|
|
msach@94
|
322 * so goes into loop until exit matches pre-gate, now it knows coreloop
|
|
msach@94
|
323 * is not in protected and cannot get back in, so can proceed.
|
|
msach@94
|
324 *
|
|
msach@94
|
325 *Implementation for the stealer:
|
|
msach@94
|
326 *
|
|
msach@94
|
327 *First, acquire the stealer lock -- only cores with no work to do will
|
|
msach@94
|
328 * compete to steal, so not a big performance penalty having only one --
|
|
msach@94
|
329 * will rarely have multiple stealers in a system with plenty of work -- and
|
|
msach@94
|
330 * in a system with little work, it doesn't matter.
|
|
msach@94
|
331 *
|
|
msach@94
|
332 *Note, have single-reader, single-writer pattern for all variables used to
|
|
msach@94
|
333 * communicate between stealer and victims
|
|
msach@94
|
334 *
|
|
msach@94
|
335 *So, scan the queues of the core loops, until find non-empty. Each core
|
|
msach@94
|
336 * has its own list that it scans. The list goes in order from closest to
|
|
msach@94
|
337 * furthest core, so it steals first from close cores. Later can add
|
|
msach@94
|
338 * taking info from the app about overlapping footprints, and scan all the
|
|
msach@94
|
339 * others then choose work with the most footprint overlap with the contents
|
|
msach@94
|
340 * of this core's cache.
|
|
msach@94
|
341 *
|
|
msach@94
|
342 *Now, have a victim want to take work from. So, shut the gate in that
|
|
msach@94
|
343 * coreloop, by setting the "gate closed" var on its stack to TRUE.
|
|
msach@94
|
344 *Then, read the core's pre-gate progress and compare to the core's exit
|
|
msach@94
|
345 * progress.
|
|
msach@94
|
346 *If same, can proceed to take work from the coreloop's queue. When done,
|
|
msach@94
|
347 * write FALSE to gate closed var.
|
|
msach@94
|
348 *If different, then enter a loop that reads the pre-gate progress, then
|
|
msach@94
|
349 * compares to exit progress then to wait progress. When one of two
|
|
msach@94
|
350 * matches, proceed. Take work from the coreloop's queue. When done,
|
|
msach@94
|
351 * write FALSE to the gate closed var.
|
|
msach@94
|
352 *
|
|
msach@94
|
353 */
|
|
msach@94
|
354 void inline
|
|
msach@94
|
355 gateProtected_stealWorkInto( SchedSlot *currSlot,
|
|
msach@94
|
356 VMSQueueStruc *myReadyToAnimateQ,
|
|
msach@94
|
357 VirtProcr *masterPr )
|
|
msach@94
|
358 {
|
|
msach@94
|
359 VirtProcr *stolenPr;
|
|
msach@94
|
360 int32 coreIdx, i, haveAVictim, gotLock;
|
|
msach@94
|
361 VMSQueueStruc *victimsQ;
|
|
msach@94
|
362
|
|
msach@94
|
363 volatile GateStruc *vicGate;
|
|
msach@94
|
364 int32 coreMightBeInProtected;
|
|
msach@94
|
365
|
|
msach@94
|
366
|
|
msach@94
|
367
|
|
msach@94
|
368 //see if any other cores have work available to steal
|
|
msach@94
|
369 haveAVictim = FALSE;
|
|
msach@94
|
370 coreIdx = masterPr->coreAnimatedBy;
|
|
msach@94
|
371 for( i = 0; i < NUM_CORES -1; i++ )
|
|
msach@94
|
372 {
|
|
msach@94
|
373 if( coreIdx >= NUM_CORES -1 )
|
|
msach@94
|
374 { coreIdx = 0;
|
|
msach@94
|
375 }
|
|
msach@94
|
376 else
|
|
msach@94
|
377 { coreIdx++;
|
|
msach@94
|
378 }
|
|
msach@94
|
379 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
|
|
msach@94
|
380 if( numInVMSQ( victimsQ ) > 0 )
|
|
msach@94
|
381 { haveAVictim = TRUE;
|
|
msach@94
|
382 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
|
|
msach@94
|
383 break;
|
|
msach@94
|
384 }
|
|
msach@94
|
385 }
|
|
msach@94
|
386 if( !haveAVictim ) return; //no work to steal, exit
|
|
msach@94
|
387
|
|
msach@94
|
388 //have a victim core, now get the stealer-lock
|
|
msach@94
|
389 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
|
|
msach@94
|
390 UNLOCKED, LOCKED );
|
|
msach@94
|
391 if( !gotLock ) return; //go back to core loop, which will re-start master
|
|
msach@94
|
392
|
|
msach@94
|
393
|
|
msach@94
|
394 //====== Start Gate-protection =======
|
|
msach@94
|
395 vicGate->gateClosed = TRUE;
|
|
msach@94
|
396 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
|
|
msach@94
|
397 while( coreMightBeInProtected )
|
|
msach@94
|
398 { //wait until sure
|
|
msach@94
|
399 if( vicGate->preGateProgress == vicGate->waitProgress )
|
|
msach@94
|
400 coreMightBeInProtected = FALSE;
|
|
msach@94
|
401 if( vicGate->preGateProgress == vicGate->exitProgress )
|
|
msach@94
|
402 coreMightBeInProtected = FALSE;
|
|
msach@94
|
403 }
|
|
msach@94
|
404
|
|
msach@94
|
405 stolenPr = readVMSQ ( victimsQ );
|
|
msach@94
|
406
|
|
msach@94
|
407 vicGate->gateClosed = FALSE;
|
|
msach@94
|
408 //======= End Gate-protection =======
|
|
msach@94
|
409
|
|
msach@94
|
410
|
|
msach@94
|
411 if( stolenPr != NULL ) //victim could have been in protected and taken
|
|
msach@94
|
412 { currSlot->procrAssignedToSlot = stolenPr;
|
|
msach@94
|
413 stolenPr->schedSlot = currSlot;
|
|
msach@94
|
414 currSlot->needsProcrAssigned = FALSE;
|
|
msach@94
|
415
|
|
msach@94
|
416 writeVMSQ( stolenPr, myReadyToAnimateQ );
|
|
msach@94
|
417 }
|
|
msach@94
|
418
|
|
msach@94
|
419 //unlock the work stealing lock
|
|
msach@94
|
420 _VMSMasterEnv->workStealingLock = UNLOCKED;
|
|
msach@94
|
421 }
|