annotate MasterLoop.c @ 209:0c83ea8adefc

Close to compilable version of common_ancestor -- still includes HW dep stuff
author Some Random Person <seanhalle@yahoo.com>
date Sun, 04 Mar 2012 14:26:35 -0800
parents eaf7e4c58c9e
children a18539c0bc37
rev   line source
seanhalle@208 1 /*
seanhalle@208 2 * Copyright 2010 OpenSourceStewardshipFoundation
seanhalle@208 3 *
seanhalle@208 4 * Licensed under BSD
seanhalle@208 5 */
seanhalle@208 6
seanhalle@208 7
seanhalle@208 8
seanhalle@208 9 #include <stdio.h>
seanhalle@208 10 #include <stddef.h>
seanhalle@208 11
seanhalle@208 12 #include "VMS.h"
seanhalle@208 13
seanhalle@208 14
seanhalle@208 15 //===========================================================================
seanhalle@208 16 void inline
seanhalle@208 17 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
seanhalle@209 18 SlaveVP *masterVP );
seanhalle@208 19
seanhalle@208 20 //===========================================================================
seanhalle@208 21
seanhalle@208 22
seanhalle@208 23
seanhalle@208 24 /*This code is animated by the virtual Master processor.
seanhalle@208 25 *
seanhalle@208 26 *Polls each sched slot exactly once, hands any requests made by a newly
seanhalle@208 27 * done slave to the "request handler" plug-in function
seanhalle@208 28 *
seanhalle@209 29 *Any slots that need a Slv assigned are given to the "schedule"
seanhalle@209 30 * plug-in function, which tries to assign a Slv (slave) to it.
seanhalle@208 31 *
seanhalle@208 32 *When all slots needing a processor have been given to the schedule plug-in,
seanhalle@209 33 * a fraction of the slaves successfully scheduled are put into the
seanhalle@208 34 * work queue, then a continuation of this function is put in, then the rest
seanhalle@209 35 * of the Slvs that were successfully scheduled.
seanhalle@208 36 *
seanhalle@208 37 *The first thing the continuation does is busy-wait until the previous
seanhalle@208 38 * animation completes. This is because an (unlikely) continuation may
seanhalle@208 39 * sneak through queue before previous continuation is done putting second
seanhalle@208 40 * part of scheduled slaves in, which is the only race condition.
seanhalle@208 41 *
seanhalle@208 42 */
seanhalle@208 43
seanhalle@208 44 /*May 29, 2010 -- birth a Master during init so that first core loop to
seanhalle@208 45 * start running gets it and does all the stuff for a newly born --
seanhalle@208 46 * from then on, will be doing continuation, but do suspension self
seanhalle@208 47 * directly at end of master loop
seanhalle@209 48 *So VMS_WL__init just births the master virtual processor same way it births
seanhalle@208 49 * all the others -- then does any extra setup needed and puts it into the
seanhalle@208 50 * work queue.
seanhalle@208 51 *However means have to make masterEnv a global static volatile the same way
seanhalle@208 52 * did with readyToAnimateQ in core loop. -- for performance, put the
seanhalle@208 53 * jump to the core loop directly in here, and have it directly jump back.
seanhalle@208 54 *
seanhalle@208 55 *
seanhalle@208 56 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
seanhalle@208 57 * avoids the suspected bug in the system stack that causes bizarre faults
seanhalle@208 58 * at random places in the system code.
seanhalle@208 59 *
seanhalle@208 60 *So, this function is coupled to each of the MasterVPs, -- meaning this
seanhalle@208 61 * function can't rely on a particular stack and frame -- each MasterVP that
seanhalle@208 62 * animates this function has a different one.
seanhalle@208 63 *
seanhalle@208 64 *At this point, the masterLoop does not write itself into the queue anymore,
seanhalle@208 65 * instead, the coreLoop acquires the masterLock when it has nothing to
seanhalle@208 66 * animate, and then animates its own masterLoop. However, still try to put
seanhalle@209 67 * several AppSlvs into the queue to amortize the startup cost of switching
seanhalle@208 68 * to the MasterVP. Note, don't have to worry about latency of requests much
seanhalle@208 69 * because most requests generate work for same core -- only latency issue
seanhalle@208 70 * is case when other cores starved and one core's requests generate work
seanhalle@208 71 * for them -- so keep max in queue to 3 or 4..
seanhalle@208 72 */
seanhalle@209 73 void masterLoop( void *initData, SlaveVP *animatingSlv )
seanhalle@208 74 {
seanhalle@208 75 int32 slotIdx, numSlotsFilled;
seanhalle@209 76 SlaveVP *schedSlaveVP;
seanhalle@208 77 SchedSlot *currSlot, **schedSlots;
seanhalle@208 78 MasterEnv *masterEnv;
seanhalle@208 79 VMSQueueStruc *readyToAnimateQ;
seanhalle@208 80
seanhalle@209 81 Sched_Assigner slaveAssigner;
seanhalle@208 82 RequestHandler requestHandler;
seanhalle@208 83 void *semanticEnv;
seanhalle@208 84
seanhalle@208 85 int32 thisCoresIdx;
seanhalle@209 86 SlaveVP *masterVP;
seanhalle@209 87 volatile SlaveVP *volatileMasterVP;
seanhalle@208 88
seanhalle@209 89 volatileMasterVP = animatingSlv;
seanhalle@209 90 masterVP = (SlaveVP*)volatileMasterVP; //used to force re-define after jmp
seanhalle@208 91
seanhalle@208 92 //First animation of each MasterVP will in turn animate this part
seanhalle@209 93 // of setup code.. (Slv creator sets up the stack as if this function
seanhalle@208 94 // was called normally, but actually get here by jmp)
seanhalle@208 95 //So, setup values about stack ptr, jmp pt and all that
seanhalle@209 96 //masterVP->resumeInstrPtr = &&masterLoopStartPt;
seanhalle@208 97
seanhalle@208 98
seanhalle@208 99 //Note, got rid of writing the stack and frame ptr up here, because
seanhalle@208 100 // only one
seanhalle@208 101 // core can ever animate a given MasterVP, so don't need to communicate
seanhalle@208 102 // new frame and stack ptr to the MasterVP storage before a second
seanhalle@208 103 // version of that MasterVP can get animated on a different core.
seanhalle@208 104 //Also got rid of the busy-wait.
seanhalle@208 105
seanhalle@208 106
seanhalle@208 107 //masterLoopStartPt:
seanhalle@208 108 while(1){
seanhalle@208 109
seanhalle@209 110 MEAS__Capture_Pre_Master_Point
seanhalle@208 111
seanhalle@208 112 masterEnv = (MasterEnv*)_VMSMasterEnv;
seanhalle@208 113
seanhalle@208 114 //GCC may optimize so doesn't always re-define from frame-storage
seanhalle@209 115 masterVP = (SlaveVP*)volatileMasterVP; //just to make sure after jmp
seanhalle@209 116 thisCoresIdx = masterVP->coreAnimatedBy;
seanhalle@208 117 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
seanhalle@208 118 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
seanhalle@208 119
seanhalle@208 120 requestHandler = masterEnv->requestHandler;
seanhalle@209 121 slaveAssigner = masterEnv->slaveAssigner;
seanhalle@208 122 semanticEnv = masterEnv->semanticEnv;
seanhalle@208 123
seanhalle@208 124
seanhalle@208 125 //Poll each slot's Done flag
seanhalle@208 126 numSlotsFilled = 0;
seanhalle@208 127 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
seanhalle@208 128 {
seanhalle@208 129 currSlot = schedSlots[ slotIdx ];
seanhalle@208 130
seanhalle@208 131 if( currSlot->workIsDone )
seanhalle@208 132 {
seanhalle@208 133 currSlot->workIsDone = FALSE;
seanhalle@209 134 currSlot->needsSlaveAssigned = TRUE;
seanhalle@208 135
seanhalle@208 136 //process requests from slave to master
seanhalle@208 137 //====================== MEASUREMENT STUFF ===================
seanhalle@209 138 #ifdef MEAS__TURN_ON_PLUGIN_MEAS
seanhalle@208 139 int32 startStamp1, endStamp1;
seanhalle@208 140 saveLowTimeStampCountInto( startStamp1 );
seanhalle@208 141 #endif
seanhalle@208 142 //============================================================
seanhalle@209 143 (*requestHandler)( currSlot->slaveAssignedToSlot, semanticEnv );
seanhalle@208 144 //====================== MEASUREMENT STUFF ===================
seanhalle@209 145 #ifdef MEAS__TURN_ON_PLUGIN_MEAS
seanhalle@208 146 saveLowTimeStampCountInto( endStamp1 );
seanhalle@208 147 addIntervalToHist( startStamp1, endStamp1,
seanhalle@208 148 _VMSMasterEnv->reqHdlrLowTimeHist );
seanhalle@208 149 addIntervalToHist( startStamp1, endStamp1,
seanhalle@208 150 _VMSMasterEnv->reqHdlrHighTimeHist );
seanhalle@208 151 #endif
seanhalle@208 152 //============================================================
seanhalle@208 153 }
seanhalle@209 154 if( currSlot->needsSlaveAssigned )
seanhalle@209 155 { //give slot a new Slv
seanhalle@209 156 schedSlaveVP =
seanhalle@209 157 (*slaveAssigner)( semanticEnv, thisCoresIdx );
seanhalle@208 158
seanhalle@209 159 if( schedSlaveVP != NULL )
seanhalle@209 160 { currSlot->slaveAssignedToSlot = schedSlaveVP;
seanhalle@209 161 schedSlaveVP->schedSlot = currSlot;
seanhalle@209 162 currSlot->needsSlaveAssigned = FALSE;
seanhalle@208 163 numSlotsFilled += 1;
seanhalle@208 164
seanhalle@209 165 writeVMSQ( schedSlaveVP, readyToAnimateQ );
seanhalle@208 166 }
seanhalle@208 167 }
seanhalle@208 168 }
seanhalle@208 169
seanhalle@208 170
seanhalle@208 171 #ifdef USE_WORK_STEALING
seanhalle@208 172 //If no slots filled, means no more work, look for work to steal.
seanhalle@208 173 if( numSlotsFilled == 0 )
seanhalle@209 174 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterVP );
seanhalle@208 175 }
seanhalle@208 176 #endif
seanhalle@208 177
seanhalle@209 178 MEAS__Capture_Post_Master_Point;
seanhalle@208 179
seanhalle@209 180 masterSwitchToCoreLoop(animatingSlv);
seanhalle@208 181 flushRegisters();
seanhalle@208 182 }//MasterLoop
seanhalle@208 183
seanhalle@208 184
seanhalle@208 185 }
seanhalle@208 186
seanhalle@208 187
seanhalle@208 188
seanhalle@208 189 /*This has a race condition -- the coreloops are accessing their own queues
seanhalle@208 190 * at the same time that this work-stealer on a different core is trying to
seanhalle@208 191 */
seanhalle@208 192 void inline
seanhalle@208 193 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
seanhalle@209 194 SlaveVP *masterVP )
seanhalle@208 195 {
seanhalle@209 196 SlaveVP *stolenSlv;
seanhalle@208 197 int32 coreIdx, i;
seanhalle@208 198 VMSQueueStruc *currQ;
seanhalle@208 199
seanhalle@209 200 stolenSlv = NULL;
seanhalle@209 201 coreIdx = masterVP->coreAnimatedBy;
seanhalle@208 202 for( i = 0; i < NUM_CORES -1; i++ )
seanhalle@208 203 {
seanhalle@208 204 if( coreIdx >= NUM_CORES -1 )
seanhalle@208 205 { coreIdx = 0;
seanhalle@208 206 }
seanhalle@208 207 else
seanhalle@208 208 { coreIdx++;
seanhalle@208 209 }
seanhalle@208 210 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
seanhalle@208 211 if( numInVMSQ( currQ ) > 0 )
seanhalle@209 212 { stolenSlv = readVMSQ (currQ );
seanhalle@208 213 break;
seanhalle@208 214 }
seanhalle@208 215 }
seanhalle@208 216
seanhalle@209 217 if( stolenSlv != NULL )
seanhalle@209 218 { currSlot->slaveAssignedToSlot = stolenSlv;
seanhalle@209 219 stolenSlv->schedSlot = currSlot;
seanhalle@209 220 currSlot->needsSlaveAssigned = FALSE;
seanhalle@208 221
seanhalle@209 222 writeVMSQ( stolenSlv, readyToAnimateQ );
seanhalle@208 223 }
seanhalle@208 224 }
seanhalle@208 225
seanhalle@208 226 /*This algorithm makes the common case fast. Make the coreloop passive,
seanhalle@208 227 * and show its progress. Make the stealer control a gate that coreloop
seanhalle@208 228 * has to pass.
seanhalle@208 229 *To avoid interference, only one stealer at a time. Use a global
seanhalle@208 230 * stealer-lock.
seanhalle@208 231 *
seanhalle@208 232 *The pattern is based on a gate -- stealer shuts the gate, then monitors
seanhalle@208 233 * to be sure any already past make it all the way out, before starting.
seanhalle@208 234 *So, have a "progress" measure just before the gate, then have two after it,
seanhalle@208 235 * one is in a "waiting room" outside the gate, the other is at the exit.
seanhalle@208 236 *Then, the stealer first shuts the gate, then checks the progress measure
seanhalle@208 237 * outside it, then looks to see if the progress measure at the exit is the
seanhalle@208 238 * same. If yes, it knows the protected area is empty 'cause no other way
seanhalle@208 239 * to get in and the last to get in also exited.
seanhalle@208 240 *If the progress measure at the exit is not the same, then the stealer goes
seanhalle@208 241 * into a loop checking both the waiting-area and the exit progress-measures
seanhalle@208 242 * until one of them shows the same as the measure outside the gate. Might
seanhalle@208 243 * as well re-read the measure outside the gate each go around, just to be
seanhalle@208 244 * sure. It is guaranteed that one of the two will eventually match the one
seanhalle@208 245 * outside the gate.
seanhalle@208 246 *
seanhalle@208 247 *Here's an informal proof of correctness:
seanhalle@208 248 *The gate can be closed at any point, and have only four cases:
seanhalle@208 249 * 1) coreloop made it past the gate-closing but not yet past the exit
seanhalle@208 250 * 2) coreloop made it past the pre-gate progress update but not yet past
seanhalle@208 251 * the gate,
seanhalle@208 252 * 3) coreloop is right before the pre-gate update
seanhalle@208 253 * 4) coreloop is past the exit and far from the pre-gate update.
seanhalle@208 254 *
seanhalle@208 255 * Covering the cases in reverse order,
seanhalle@208 256 * 4) is not a problem -- stealer will read pre-gate progress, see that it
seanhalle@208 257 * matches exit progress, and the gate is closed, so stealer can proceed.
seanhalle@208 258 * 3) stealer will read pre-gate progress just after coreloop updates it..
seanhalle@208 259 * so stealer goes into a loop until the coreloop causes wait-progress
seanhalle@208 260 * to match pre-gate progress, so then stealer can proceed
seanhalle@208 261 * 2) same as 3..
seanhalle@208 262 * 1) stealer reads pre-gate progress, sees that it's different than exit,
seanhalle@208 263 * so goes into loop until exit matches pre-gate, now it knows coreloop
seanhalle@208 264 * is not in protected and cannot get back in, so can proceed.
seanhalle@208 265 *
seanhalle@208 266 *Implementation for the stealer:
seanhalle@208 267 *
seanhalle@208 268 *First, acquire the stealer lock -- only cores with no work to do will
seanhalle@208 269 * compete to steal, so not a big performance penalty having only one --
seanhalle@208 270 * will rarely have multiple stealers in a system with plenty of work -- and
seanhalle@208 271 * in a system with little work, it doesn't matter.
seanhalle@208 272 *
seanhalle@208 273 *Note, have single-reader, single-writer pattern for all variables used to
seanhalle@208 274 * communicate between stealer and victims
seanhalle@208 275 *
seanhalle@208 276 *So, scan the queues of the core loops, until find non-empty. Each core
seanhalle@208 277 * has its own list that it scans. The list goes in order from closest to
seanhalle@208 278 * furthest core, so it steals first from close cores. Later can add
seanhalle@208 279 * taking info from the app about overlapping footprints, and scan all the
seanhalle@208 280 * others then choose work with the most footprint overlap with the contents
seanhalle@208 281 * of this core's cache.
seanhalle@208 282 *
seanhalle@208 283 *Now, have a victim want to take work from. So, shut the gate in that
seanhalle@208 284 * coreloop, by setting the "gate closed" var on its stack to TRUE.
seanhalle@208 285 *Then, read the core's pre-gate progress and compare to the core's exit
seanhalle@208 286 * progress.
seanhalle@208 287 *If same, can proceed to take work from the coreloop's queue. When done,
seanhalle@208 288 * write FALSE to gate closed var.
seanhalle@208 289 *If different, then enter a loop that reads the pre-gate progress, then
seanhalle@208 290 * compares to exit progress then to wait progress. When one of two
seanhalle@208 291 * matches, proceed. Take work from the coreloop's queue. When done,
seanhalle@208 292 * write FALSE to the gate closed var.
seanhalle@208 293 *
seanhalle@208 294 */
seanhalle@208 295 void inline
seanhalle@208 296 gateProtected_stealWorkInto( SchedSlot *currSlot,
seanhalle@208 297 VMSQueueStruc *myReadyToAnimateQ,
seanhalle@209 298 SlaveVP *masterVP )
seanhalle@208 299 {
seanhalle@209 300 SlaveVP *stolenSlv;
seanhalle@208 301 int32 coreIdx, i, haveAVictim, gotLock;
seanhalle@208 302 VMSQueueStruc *victimsQ;
seanhalle@208 303
seanhalle@208 304 volatile GateStruc *vicGate;
seanhalle@208 305 int32 coreMightBeInProtected;
seanhalle@208 306
seanhalle@208 307
seanhalle@208 308
seanhalle@208 309 //see if any other cores have work available to steal
seanhalle@208 310 haveAVictim = FALSE;
seanhalle@209 311 coreIdx = masterVP->coreAnimatedBy;
seanhalle@208 312 for( i = 0; i < NUM_CORES -1; i++ )
seanhalle@208 313 {
seanhalle@208 314 if( coreIdx >= NUM_CORES -1 )
seanhalle@208 315 { coreIdx = 0;
seanhalle@208 316 }
seanhalle@208 317 else
seanhalle@208 318 { coreIdx++;
seanhalle@208 319 }
seanhalle@208 320 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
seanhalle@208 321 if( numInVMSQ( victimsQ ) > 0 )
seanhalle@208 322 { haveAVictim = TRUE;
seanhalle@208 323 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
seanhalle@208 324 break;
seanhalle@208 325 }
seanhalle@208 326 }
seanhalle@208 327 if( !haveAVictim ) return; //no work to steal, exit
seanhalle@208 328
seanhalle@208 329 //have a victim core, now get the stealer-lock
seanhalle@208 330 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
seanhalle@208 331 UNLOCKED, LOCKED );
seanhalle@208 332 if( !gotLock ) return; //go back to core loop, which will re-start master
seanhalle@208 333
seanhalle@208 334
seanhalle@208 335 //====== Start Gate-protection =======
seanhalle@208 336 vicGate->gateClosed = TRUE;
seanhalle@208 337 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
seanhalle@208 338 while( coreMightBeInProtected )
seanhalle@208 339 { //wait until sure
seanhalle@208 340 if( vicGate->preGateProgress == vicGate->waitProgress )
seanhalle@208 341 coreMightBeInProtected = FALSE;
seanhalle@208 342 if( vicGate->preGateProgress == vicGate->exitProgress )
seanhalle@208 343 coreMightBeInProtected = FALSE;
seanhalle@208 344 }
seanhalle@208 345
seanhalle@209 346 stolenSlv = readVMSQ ( victimsQ );
seanhalle@208 347
seanhalle@208 348 vicGate->gateClosed = FALSE;
seanhalle@208 349 //======= End Gate-protection =======
seanhalle@208 350
seanhalle@208 351
seanhalle@209 352 if( stolenSlv != NULL ) //victim could have been in protected and taken
seanhalle@209 353 { currSlot->slaveAssignedToSlot = stolenSlv;
seanhalle@209 354 stolenSlv->schedSlot = currSlot;
seanhalle@209 355 currSlot->needsSlaveAssigned = FALSE;
seanhalle@208 356
seanhalle@209 357 writeVMSQ( stolenSlv, myReadyToAnimateQ );
seanhalle@208 358 }
seanhalle@208 359
seanhalle@208 360 //unlock the work stealing lock
seanhalle@208 361 _VMSMasterEnv->workStealingLock = UNLOCKED;
seanhalle@208 362 }