annotate MasterLoop.c @ 218:82f7defac851

Added backoff to core loop, cleaned up core loop code, cleaned comments
author Some Random Person <seanhalle@yahoo.com>
date Sat, 10 Mar 2012 21:48:53 -0800
parents a18539c0bc37
children
rev   line source
seanhalle@208 1 /*
seanhalle@208 2 * Copyright 2010 OpenSourceStewardshipFoundation
seanhalle@208 3 *
seanhalle@208 4 * Licensed under BSD
seanhalle@208 5 */
seanhalle@208 6
seanhalle@208 7
seanhalle@208 8
seanhalle@208 9 #include <stdio.h>
seanhalle@208 10 #include <stddef.h>
seanhalle@208 11
seanhalle@208 12 #include "VMS.h"
seanhalle@208 13
seanhalle@208 14
seanhalle@208 15 //===========================================================================
seanhalle@208 16 void inline
seanhalle@208 17 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
seanhalle@209 18 SlaveVP *masterVP );
seanhalle@208 19
seanhalle@208 20 //===========================================================================
seanhalle@208 21
seanhalle@208 22
seanhalle@208 23
seanhalle@208 24 /*This code is animated by the virtual Master processor.
seanhalle@208 25 *
seanhalle@208 26 *Polls each sched slot exactly once, hands any requests made by a newly
seanhalle@208 27 * done slave to the "request handler" plug-in function
seanhalle@208 28 *
seanhalle@209 29 *Any slots that need a Slv assigned are given to the "schedule"
seanhalle@209 30 * plug-in function, which tries to assign a Slv (slave) to it.
seanhalle@208 31 *
seanhalle@208 32 *When all slots needing a processor have been given to the schedule plug-in,
seanhalle@209 33 * a fraction of the slaves successfully scheduled are put into the
seanhalle@208 34 * work queue, then a continuation of this function is put in, then the rest
seanhalle@209 35 * of the Slvs that were successfully scheduled.
seanhalle@208 36 *
seanhalle@208 37 *The first thing the continuation does is busy-wait until the previous
seanhalle@208 38 * animation completes. This is because an (unlikely) continuation may
seanhalle@208 39 * sneak through queue before previous continuation is done putting second
seanhalle@208 40 * part of scheduled slaves in, which is the only race condition.
seanhalle@208 41 *
seanhalle@208 42 */
seanhalle@208 43
seanhalle@216 44 /*May 29, 2010 -- birth a Master during init so that first core controller to
seanhalle@208 45 * start running gets it and does all the stuff for a newly born --
seanhalle@208 46 * from then on, will be doing continuation, but do suspension self
seanhalle@208 47 * directly at end of master loop
seanhalle@209 48 *So VMS_WL__init just births the master virtual processor same way it births
seanhalle@208 49 * all the others -- then does any extra setup needed and puts it into the
seanhalle@208 50 * work queue.
seanhalle@208 51 *However means have to make masterEnv a global static volatile the same way
seanhalle@216 52 * did with readyToAnimateQ in core controller. -- for performance, put the
seanhalle@216 53 * jump to the core controller directly in here, and have it directly jump back.
seanhalle@208 54 *
seanhalle@208 55 *
seanhalle@208 56 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
seanhalle@208 57 * avoids the suspected bug in the system stack that causes bizarre faults
seanhalle@208 58 * at random places in the system code.
seanhalle@208 59 *
seanhalle@208 60 *So, this function is coupled to each of the MasterVPs, -- meaning this
seanhalle@208 61 * function can't rely on a particular stack and frame -- each MasterVP that
seanhalle@208 62 * animates this function has a different one.
seanhalle@208 63 *
seanhalle@208 64 *At this point, the masterLoop does not write itself into the queue anymore,
seanhalle@216 65 * instead, the coreCtlr acquires the masterLock when it has nothing to
seanhalle@208 66 * animate, and then animates its own masterLoop. However, still try to put
seanhalle@209 67 * several AppSlvs into the queue to amortize the startup cost of switching
seanhalle@208 68 * to the MasterVP. Note, don't have to worry about latency of requests much
seanhalle@208 69 * because most requests generate work for same core -- only latency issue
seanhalle@208 70 * is case when other cores starved and one core's requests generate work
seanhalle@208 71 * for them -- so keep max in queue to 3 or 4..
seanhalle@208 72 */
seanhalle@209 73 void masterLoop( void *initData, SlaveVP *animatingSlv )
seanhalle@208 74 {
seanhalle@208 75 int32 slotIdx, numSlotsFilled;
seanhalle@209 76 SlaveVP *schedSlaveVP;
seanhalle@208 77 SchedSlot *currSlot, **schedSlots;
seanhalle@208 78 MasterEnv *masterEnv;
seanhalle@208 79 VMSQueueStruc *readyToAnimateQ;
seanhalle@208 80
seanhalle@209 81 Sched_Assigner slaveAssigner;
seanhalle@208 82 RequestHandler requestHandler;
seanhalle@208 83 void *semanticEnv;
seanhalle@208 84
seanhalle@208 85 int32 thisCoresIdx;
seanhalle@210 86 SlaveVP *masterVP;
seanhalle@209 87 volatile SlaveVP *volatileMasterVP;
seanhalle@208 88
seanhalle@209 89 volatileMasterVP = animatingSlv;
seanhalle@209 90 masterVP = (SlaveVP*)volatileMasterVP; //used to force re-define after jmp
seanhalle@208 91
seanhalle@208 92 //First animation of each MasterVP will in turn animate this part
seanhalle@209 93 // of setup code.. (Slv creator sets up the stack as if this function
seanhalle@208 94 // was called normally, but actually get here by jmp)
seanhalle@208 95 //So, setup values about stack ptr, jmp pt and all that
seanhalle@209 96 //masterVP->resumeInstrPtr = &&masterLoopStartPt;
seanhalle@208 97
seanhalle@208 98
seanhalle@208 99 //Note, got rid of writing the stack and frame ptr up here, because
seanhalle@208 100 // only one
seanhalle@208 101 // core can ever animate a given MasterVP, so don't need to communicate
seanhalle@208 102 // new frame and stack ptr to the MasterVP storage before a second
seanhalle@208 103 // version of that MasterVP can get animated on a different core.
seanhalle@208 104 //Also got rid of the busy-wait.
seanhalle@208 105
seanhalle@208 106
seanhalle@208 107 //masterLoopStartPt:
seanhalle@208 108 while(1){
seanhalle@208 109
seanhalle@209 110 MEAS__Capture_Pre_Master_Point
seanhalle@208 111
seanhalle@208 112 masterEnv = (MasterEnv*)_VMSMasterEnv;
seanhalle@208 113
seanhalle@208 114 //GCC may optimize so doesn't always re-define from frame-storage
seanhalle@209 115 masterVP = (SlaveVP*)volatileMasterVP; //just to make sure after jmp
seanhalle@209 116 thisCoresIdx = masterVP->coreAnimatedBy;
seanhalle@208 117 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
seanhalle@208 118 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
seanhalle@208 119
seanhalle@208 120 requestHandler = masterEnv->requestHandler;
seanhalle@210 121 slaveAssigner = masterEnv->slaveAssigner;
seanhalle@208 122 semanticEnv = masterEnv->semanticEnv;
seanhalle@208 123
seanhalle@208 124
seanhalle@208 125 //Poll each slot's Done flag
seanhalle@208 126 numSlotsFilled = 0;
seanhalle@208 127 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
seanhalle@208 128 {
seanhalle@208 129 currSlot = schedSlots[ slotIdx ];
seanhalle@208 130
seanhalle@208 131 if( currSlot->workIsDone )
seanhalle@208 132 {
seanhalle@208 133 currSlot->workIsDone = FALSE;
seanhalle@209 134 currSlot->needsSlaveAssigned = TRUE;
seanhalle@208 135
seanhalle@210 136 MEAS__startReqHdlr;
seanhalle@210 137
seanhalle@210 138 //process the requests made by the slave (held inside slave struc)
seanhalle@209 139 (*requestHandler)( currSlot->slaveAssignedToSlot, semanticEnv );
seanhalle@210 140
seanhalle@210 141 MEAS__endReqHdlr;
seanhalle@208 142 }
seanhalle@209 143 if( currSlot->needsSlaveAssigned )
seanhalle@209 144 { //give slot a new Slv
seanhalle@209 145 schedSlaveVP =
seanhalle@209 146 (*slaveAssigner)( semanticEnv, thisCoresIdx );
seanhalle@208 147
seanhalle@209 148 if( schedSlaveVP != NULL )
seanhalle@209 149 { currSlot->slaveAssignedToSlot = schedSlaveVP;
seanhalle@210 150 schedSlaveVP->schedSlot = currSlot;
seanhalle@209 151 currSlot->needsSlaveAssigned = FALSE;
seanhalle@208 152 numSlotsFilled += 1;
seanhalle@208 153 }
seanhalle@208 154 }
seanhalle@208 155 }
seanhalle@208 156
seanhalle@208 157
seanhalle@210 158 #ifdef SYS__TURN_ON_WORK_STEALING
seanhalle@208 159 //If no slots filled, means no more work, look for work to steal.
seanhalle@208 160 if( numSlotsFilled == 0 )
seanhalle@209 161 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterVP );
seanhalle@208 162 }
seanhalle@208 163 #endif
seanhalle@208 164
seanhalle@209 165 MEAS__Capture_Post_Master_Point;
seanhalle@208 166
seanhalle@216 167 masterSwitchToCoreCtlr(animatingSlv);
seanhalle@208 168 flushRegisters();
seanhalle@208 169 }//MasterLoop
seanhalle@208 170
seanhalle@208 171
seanhalle@208 172 }
seanhalle@208 173
seanhalle@208 174
seanhalle@208 175
seanhalle@208 176 /*This has a race condition -- the coreloops are accessing their own queues
seanhalle@208 177 * at the same time that this work-stealer on a different core is trying to
seanhalle@208 178 */
seanhalle@208 179 void inline
seanhalle@208 180 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
seanhalle@209 181 SlaveVP *masterVP )
seanhalle@208 182 {
seanhalle@209 183 SlaveVP *stolenSlv;
seanhalle@208 184 int32 coreIdx, i;
seanhalle@208 185 VMSQueueStruc *currQ;
seanhalle@208 186
seanhalle@209 187 stolenSlv = NULL;
seanhalle@209 188 coreIdx = masterVP->coreAnimatedBy;
seanhalle@208 189 for( i = 0; i < NUM_CORES -1; i++ )
seanhalle@208 190 {
seanhalle@208 191 if( coreIdx >= NUM_CORES -1 )
seanhalle@208 192 { coreIdx = 0;
seanhalle@208 193 }
seanhalle@208 194 else
seanhalle@208 195 { coreIdx++;
seanhalle@208 196 }
seanhalle@208 197 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
seanhalle@208 198 if( numInVMSQ( currQ ) > 0 )
seanhalle@209 199 { stolenSlv = readVMSQ (currQ );
seanhalle@208 200 break;
seanhalle@208 201 }
seanhalle@208 202 }
seanhalle@208 203
seanhalle@209 204 if( stolenSlv != NULL )
seanhalle@209 205 { currSlot->slaveAssignedToSlot = stolenSlv;
seanhalle@209 206 stolenSlv->schedSlot = currSlot;
seanhalle@209 207 currSlot->needsSlaveAssigned = FALSE;
seanhalle@208 208
seanhalle@209 209 writeVMSQ( stolenSlv, readyToAnimateQ );
seanhalle@208 210 }
seanhalle@208 211 }
seanhalle@208 212
seanhalle@208 213 /*This algorithm makes the common case fast. Make the coreloop passive,
seanhalle@208 214 * and show its progress. Make the stealer control a gate that coreloop
seanhalle@208 215 * has to pass.
seanhalle@208 216 *To avoid interference, only one stealer at a time. Use a global
seanhalle@208 217 * stealer-lock.
seanhalle@208 218 *
seanhalle@208 219 *The pattern is based on a gate -- stealer shuts the gate, then monitors
seanhalle@208 220 * to be sure any already past make it all the way out, before starting.
seanhalle@208 221 *So, have a "progress" measure just before the gate, then have two after it,
seanhalle@208 222 * one is in a "waiting room" outside the gate, the other is at the exit.
seanhalle@208 223 *Then, the stealer first shuts the gate, then checks the progress measure
seanhalle@208 224 * outside it, then looks to see if the progress measure at the exit is the
seanhalle@208 225 * same. If yes, it knows the protected area is empty 'cause no other way
seanhalle@208 226 * to get in and the last to get in also exited.
seanhalle@208 227 *If the progress measure at the exit is not the same, then the stealer goes
seanhalle@208 228 * into a loop checking both the waiting-area and the exit progress-measures
seanhalle@208 229 * until one of them shows the same as the measure outside the gate. Might
seanhalle@208 230 * as well re-read the measure outside the gate each go around, just to be
seanhalle@208 231 * sure. It is guaranteed that one of the two will eventually match the one
seanhalle@208 232 * outside the gate.
seanhalle@208 233 *
seanhalle@208 234 *Here's an informal proof of correctness:
seanhalle@208 235 *The gate can be closed at any point, and have only four cases:
seanhalle@208 236 * 1) coreloop made it past the gate-closing but not yet past the exit
seanhalle@208 237 * 2) coreloop made it past the pre-gate progress update but not yet past
seanhalle@208 238 * the gate,
seanhalle@208 239 * 3) coreloop is right before the pre-gate update
seanhalle@208 240 * 4) coreloop is past the exit and far from the pre-gate update.
seanhalle@208 241 *
seanhalle@208 242 * Covering the cases in reverse order,
seanhalle@208 243 * 4) is not a problem -- stealer will read pre-gate progress, see that it
seanhalle@208 244 * matches exit progress, and the gate is closed, so stealer can proceed.
seanhalle@208 245 * 3) stealer will read pre-gate progress just after coreloop updates it..
seanhalle@208 246 * so stealer goes into a loop until the coreloop causes wait-progress
seanhalle@208 247 * to match pre-gate progress, so then stealer can proceed
seanhalle@208 248 * 2) same as 3..
seanhalle@208 249 * 1) stealer reads pre-gate progress, sees that it's different than exit,
seanhalle@208 250 * so goes into loop until exit matches pre-gate, now it knows coreloop
seanhalle@208 251 * is not in protected and cannot get back in, so can proceed.
seanhalle@208 252 *
seanhalle@208 253 *Implementation for the stealer:
seanhalle@208 254 *
seanhalle@208 255 *First, acquire the stealer lock -- only cores with no work to do will
seanhalle@208 256 * compete to steal, so not a big performance penalty having only one --
seanhalle@208 257 * will rarely have multiple stealers in a system with plenty of work -- and
seanhalle@208 258 * in a system with little work, it doesn't matter.
seanhalle@208 259 *
seanhalle@208 260 *Note, have single-reader, single-writer pattern for all variables used to
seanhalle@208 261 * communicate between stealer and victims
seanhalle@208 262 *
seanhalle@216 263 *So, scan the queues of the core controllers, until find non-empty. Each core
seanhalle@208 264 * has its own list that it scans. The list goes in order from closest to
seanhalle@208 265 * furthest core, so it steals first from close cores. Later can add
seanhalle@208 266 * taking info from the app about overlapping footprints, and scan all the
seanhalle@208 267 * others then choose work with the most footprint overlap with the contents
seanhalle@208 268 * of this core's cache.
seanhalle@208 269 *
seanhalle@208 270 *Now, have a victim want to take work from. So, shut the gate in that
seanhalle@208 271 * coreloop, by setting the "gate closed" var on its stack to TRUE.
seanhalle@208 272 *Then, read the core's pre-gate progress and compare to the core's exit
seanhalle@208 273 * progress.
seanhalle@208 274 *If same, can proceed to take work from the coreloop's queue. When done,
seanhalle@208 275 * write FALSE to gate closed var.
seanhalle@208 276 *If different, then enter a loop that reads the pre-gate progress, then
seanhalle@208 277 * compares to exit progress then to wait progress. When one of two
seanhalle@208 278 * matches, proceed. Take work from the coreloop's queue. When done,
seanhalle@208 279 * write FALSE to the gate closed var.
seanhalle@208 280 *
seanhalle@208 281 */
seanhalle@208 282 void inline
seanhalle@208 283 gateProtected_stealWorkInto( SchedSlot *currSlot,
seanhalle@208 284 VMSQueueStruc *myReadyToAnimateQ,
seanhalle@209 285 SlaveVP *masterVP )
seanhalle@208 286 {
seanhalle@209 287 SlaveVP *stolenSlv;
seanhalle@208 288 int32 coreIdx, i, haveAVictim, gotLock;
seanhalle@208 289 VMSQueueStruc *victimsQ;
seanhalle@208 290
seanhalle@208 291 volatile GateStruc *vicGate;
seanhalle@208 292 int32 coreMightBeInProtected;
seanhalle@208 293
seanhalle@208 294
seanhalle@208 295
seanhalle@208 296 //see if any other cores have work available to steal
seanhalle@208 297 haveAVictim = FALSE;
seanhalle@209 298 coreIdx = masterVP->coreAnimatedBy;
seanhalle@208 299 for( i = 0; i < NUM_CORES -1; i++ )
seanhalle@208 300 {
seanhalle@208 301 if( coreIdx >= NUM_CORES -1 )
seanhalle@208 302 { coreIdx = 0;
seanhalle@208 303 }
seanhalle@208 304 else
seanhalle@208 305 { coreIdx++;
seanhalle@208 306 }
seanhalle@208 307 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
seanhalle@208 308 if( numInVMSQ( victimsQ ) > 0 )
seanhalle@208 309 { haveAVictim = TRUE;
seanhalle@208 310 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
seanhalle@208 311 break;
seanhalle@208 312 }
seanhalle@208 313 }
seanhalle@208 314 if( !haveAVictim ) return; //no work to steal, exit
seanhalle@208 315
seanhalle@208 316 //have a victim core, now get the stealer-lock
seanhalle@208 317 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
seanhalle@208 318 UNLOCKED, LOCKED );
seanhalle@216 319 if( !gotLock ) return; //go back to core controller, which will re-start master
seanhalle@208 320
seanhalle@208 321
seanhalle@208 322 //====== Start Gate-protection =======
seanhalle@208 323 vicGate->gateClosed = TRUE;
seanhalle@208 324 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
seanhalle@208 325 while( coreMightBeInProtected )
seanhalle@208 326 { //wait until sure
seanhalle@208 327 if( vicGate->preGateProgress == vicGate->waitProgress )
seanhalle@208 328 coreMightBeInProtected = FALSE;
seanhalle@208 329 if( vicGate->preGateProgress == vicGate->exitProgress )
seanhalle@208 330 coreMightBeInProtected = FALSE;
seanhalle@208 331 }
seanhalle@208 332
seanhalle@209 333 stolenSlv = readVMSQ ( victimsQ );
seanhalle@208 334
seanhalle@208 335 vicGate->gateClosed = FALSE;
seanhalle@208 336 //======= End Gate-protection =======
seanhalle@208 337
seanhalle@208 338
seanhalle@209 339 if( stolenSlv != NULL ) //victim could have been in protected and taken
seanhalle@209 340 { currSlot->slaveAssignedToSlot = stolenSlv;
seanhalle@209 341 stolenSlv->schedSlot = currSlot;
seanhalle@209 342 currSlot->needsSlaveAssigned = FALSE;
seanhalle@208 343
seanhalle@209 344 writeVMSQ( stolenSlv, myReadyToAnimateQ );
seanhalle@208 345 }
seanhalle@208 346
seanhalle@208 347 //unlock the work stealing lock
seanhalle@208 348 _VMSMasterEnv->workStealingLock = UNLOCKED;
seanhalle@208 349 }