annotate MasterLoop.c @ 166:aefd87f9d12f

loop graph
author Nina Engelhardt
date Tue, 18 Oct 2011 15:53:04 +0200
parents 395f58384a5c
children 981acd1db6af
rev   line source
Me@0 1 /*
Me@38 2 * Copyright 2010 OpenSourceStewardshipFoundation
Me@43 3 *
Me@0 4 * Licensed under BSD
Me@0 5 */
Me@0 6
Me@0 7
Me@0 8
Me@0 9 #include <stdio.h>
Me@9 10 #include <stddef.h>
Me@0 11
Me@0 12 #include "VMS.h"
msach@77 13 #include "ProcrContext.h"
Me@0 14
Me@0 15
Me@55 16 //===========================================================================
Me@55 17 void inline
Me@55 18 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
Me@55 19 VirtProcr *masterPr );
Me@55 20
Me@55 21 //===========================================================================
Me@55 22
Me@55 23
msach@69 24
Me@0 25 /*This code is animated by the virtual Master processor.
Me@0 26 *
Me@11 27 *Polls each sched slot exactly once, hands any requests made by a newly
Me@11 28 * done slave to the "request handler" plug-in function
Me@0 29 *
Me@11 30 *Any slots that need a virt procr assigned are given to the "schedule"
Me@11 31 * plug-in function, which tries to assign a virt procr (slave) to it.
Me@0 32 *
Me@11 33 *When all slots needing a processor have been given to the schedule plug-in,
Me@11 34 * a fraction of the procrs successfully scheduled are put into the
Me@11 35 * work queue, then a continuation of this function is put in, then the rest
Me@11 36 * of the virt procrs that were successfully scheduled.
Me@0 37 *
Me@11 38 *The first thing the continuation does is busy-wait until the previous
Me@11 39 * animation completes. This is because an (unlikely) continuation may
Me@11 40 * sneak through queue before previous continuation is done putting second
Me@11 41 * part of scheduled slaves in, which is the only race condition.
Me@0 42 *
Me@0 43 */
Me@0 44
Me@4 45 /*May 29, 2010 -- birth a Master during init so that first core loop to
Me@11 46 * start running gets it and does all the stuff for a newly born --
Me@11 47 * from then on, will be doing continuation, but do suspension self
Me@4 48 * directly at end of master loop
Me@4 49 *So VMS__init just births the master virtual processor same way it births
Me@4 50 * all the others -- then does any extra setup needed and puts it into the
Me@4 51 * work queue.
Me@4 52 *However means have to make masterEnv a global static volatile the same way
Me@31 53 * did with readyToAnimateQ in core loop. -- for performance, put the
Me@11 54 * jump to the core loop directly in here, and have it directly jump back.
Me@31 55 *
Me@31 56 *
Me@31 57 *Aug 18, 2010 -- Going to a separate MasterVP for each core, to see if this
Me@31 58 * avoids the suspected bug in the system stack that causes bizarre faults
Me@31 59 * at random places in the system code.
Me@31 60 *
Me@31 61 *So, this function is coupled to each of the MasterVPs, -- meaning this
Me@31 62 * function can't rely on a particular stack and frame -- each MasterVP that
Me@31 63 * animates this function has a different one.
Me@31 64 *
Me@31 65 *At this point, the masterLoop does not write itself into the queue anymore,
Me@31 66 * instead, the coreLoop acquires the masterLock when it has nothing to
Me@31 67 * animate, and then animates its own masterLoop. However, still try to put
Me@31 68 * several AppVPs into the queue to amortize the startup cost of switching
Me@31 69 * to the MasterVP. Note, don't have to worry about latency of requests much
Me@31 70 * because most requests generate work for same core -- only latency issue
Me@31 71 * is case when other cores starved and one core's requests generate work
Me@31 72 * for them -- so keep max in queue to 3 or 4..
Me@4 73 */
Me@31 74 void masterLoop( void *initData, VirtProcr *animatingPr )
Me@21 75 {
Me@55 76 int32 slotIdx, numSlotsFilled;
Me@21 77 VirtProcr *schedVirtPr;
Me@31 78 SchedSlot *currSlot, **schedSlots;
Me@0 79 MasterEnv *masterEnv;
Me@31 80 VMSQueueStruc *readyToAnimateQ;
Me@4 81
Me@0 82 SlaveScheduler slaveScheduler;
Me@0 83 RequestHandler requestHandler;
Me@31 84 void *semanticEnv;
Me@0 85
Me@55 86 int32 thisCoresIdx;
Me@31 87 VirtProcr *masterPr;
msach@69 88 volatile VirtProcr *volatileMasterPr;
msach@69 89
msach@69 90 volatileMasterPr = animatingPr;
msach@69 91 masterPr = (VirtProcr*)volatileMasterPr; //used to force re-define after jmp
Me@31 92
Nina@166 93 bulb b = new_bulb();
Nina@166 94 numSlotsFilled=1;
Me@31 95 //First animation of each MasterVP will in turn animate this part
Me@31 96 // of setup code.. (VP creator sets up the stack as if this function
Me@31 97 // was called normally, but actually get here by jmp)
Me@31 98 //So, setup values about stack ptr, jmp pt and all that
msach@71 99 //masterPr->nextInstrPt = &&masterLoopStartPt;
Me@0 100
Me@26 101
Me@31 102 //Note, got rid of writing the stack and frame ptr up here, because
Me@31 103 // only one
Me@31 104 // core can ever animate a given MasterVP, so don't need to communicate
Me@31 105 // new frame and stack ptr to the MasterVP storage before a second
Me@31 106 // version of that MasterVP can get animated on a different core.
Me@31 107 //Also got rid of the busy-wait.
Me@26 108
Me@31 109
msach@71 110 //masterLoopStartPt:
msach@70 111 while(1){
msach@71 112
Me@38 113 //============================= MEASUREMENT STUFF ========================
Me@38 114 #ifdef MEAS__TIME_MASTER
Me@38 115 //Total Master time includes one coreloop time -- just assume the core
Me@68 116 // loop time is same for Master as for AppVPs, even though it may be
Me@68 117 // smaller due to higher predictability of the fixed jmp.
Me@38 118 saveLowTimeStampCountInto( masterPr->startMasterTSCLow );
Me@38 119 #endif
Me@38 120 //========================================================================
Me@0 121
msach@69 122 masterEnv = (MasterEnv*)_VMSMasterEnv;
msach@69 123
msach@69 124 //GCC may optimize so doesn't always re-define from frame-storage
msach@69 125 masterPr = (VirtProcr*)volatileMasterPr; //just to make sure after jmp
msach@69 126 thisCoresIdx = masterPr->coreAnimatedBy;
msach@69 127 readyToAnimateQ = masterEnv->readyToAnimateQs[thisCoresIdx];
msach@69 128 schedSlots = masterEnv->allSchedSlots[thisCoresIdx];
msach@69 129
msach@69 130 requestHandler = masterEnv->requestHandler;
msach@69 131 slaveScheduler = masterEnv->slaveScheduler;
msach@69 132 semanticEnv = masterEnv->semanticEnv;
msach@69 133
Nina@166 134 #ifdef DETECT_LOOP_GRAPH
Nina@166 135 if(numSlotsFilled > 0){
Nina@166 136 b = new_bulb();
Nina@166 137 addToDynArray((void*)b,masterEnv->loop_graph_array_info);
Nina@166 138 set_bulb_core(b,thisCoresIdx);
Nina@166 139 set_bulb_id(b,masterEnv->loop_counter[thisCoresIdx]++);
Nina@166 140 }
Nina@166 141 #endif
msach@69 142
msach@69 143 //Poll each slot's Done flag
Me@55 144 numSlotsFilled = 0;
Me@26 145 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
Me@0 146 {
Me@4 147 currSlot = schedSlots[ slotIdx ];
Me@0 148
Me@4 149 if( currSlot->workIsDone )
Me@0 150 {
Me@4 151 currSlot->workIsDone = FALSE;
Me@4 152 currSlot->needsProcrAssigned = TRUE;
Me@0 153
Me@0 154 //process requests from slave to master
Me@68 155 //====================== MEASUREMENT STUFF ===================
Me@68 156 #ifdef MEAS__TIME_PLUGIN
Me@68 157 int32 startStamp1, endStamp1;
Me@68 158 saveLowTimeStampCountInto( startStamp1 );
Me@68 159 #endif
Nina@109 160 #ifdef MEAS__PERF_COUNTERS
engelhardt@108 161 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
engelhardt@108 162 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
engelhardt@108 163 lastRecord->req_core = thisCoresIdx;
Nina@131 164 saveCyclesAndInstrs(thisCoresIdx,lastRecord->next_task_req_cycles,lastRecord->next_task_req_instrs);
engelhardt@108 165 //End of task, start of next task
engelhardt@108 166 //print counters from last run
Nina@131 167 addToDynArray((void*)lastRecord,masterEnv->counter_history_array_info);
Nina@110 168 print_record_csv_to_file(lastRecord,_VMSMasterEnv->counteroutput);
Nina@130 169
Nina@130 170 Dependency* newd = new_dependency(currSlot->procrAssignedToSlot->procrID,lastRecord->task_position,currSlot->procrAssignedToSlot->procrID,lastRecord->task_position + 1);
Nina@130 171 addToDynArray((void*) newd ,masterEnv->dependenciesInfo);
Nina@130 172
Nina@166 173
Nina@166 174
Nina@129 175 //print_record_human_readable(lastRecord);
Nina@109 176 //create new entry in record array
engelhardt@108 177 CounterRecord* newRecord = VMS__malloc(sizeof(CounterRecord));
engelhardt@108 178 newRecord->req_core = thisCoresIdx;
Nina@109 179 newRecord->vp_id = currSlot->procrAssignedToSlot->procrID;
Nina@111 180 newRecord->task_position = lastRecord->task_position + 1;
Nina@131 181 newRecord->req_cycles = lastRecord->next_task_req_cycles;
Nina@131 182 newRecord->req_instrs = lastRecord->next_task_req_instrs;
Nina@110 183 getReturnAddressBeforeLibraryCall(currSlot->procrAssignedToSlot, &(newRecord->addr_of_libcall_for_req));
engelhardt@108 184 addToDynArray( (void*) newRecord, currSlot->procrAssignedToSlot->counter_history_array_info);
engelhardt@108 185 lastRecord = newRecord;
engelhardt@108 186 #endif
Me@68 187 //============================================================
Me@21 188 (*requestHandler)( currSlot->procrAssignedToSlot, semanticEnv );
Me@68 189 //====================== MEASUREMENT STUFF ===================
Me@68 190 #ifdef MEAS__TIME_PLUGIN
Me@68 191 saveLowTimeStampCountInto( endStamp1 );
Me@68 192 addIntervalToHist( startStamp1, endStamp1,
Me@68 193 _VMSMasterEnv->reqHdlrLowTimeHist );
Me@68 194 addIntervalToHist( startStamp1, endStamp1,
Me@68 195 _VMSMasterEnv->reqHdlrHighTimeHist );
Me@68 196 #endif
Nina@109 197 #ifdef MEAS__PERF_COUNTERS
Nina@109 198 //done with constraints check
Nina@109 199 saveCyclesAndInstrs(thisCoresIdx,lastRecord->sc_done_cycles,lastRecord->sc_done_instrs);
Nina@109 200 saveLowTimeStampCountInto(lastRecord->blocked_timestamp);
engelhardt@108 201 #endif
Me@68 202 //============================================================
Me@0 203 }
Me@4 204 if( currSlot->needsProcrAssigned )
Me@4 205 { //give slot a new virt procr
Nina@109 206 #ifdef MEAS__PERF_COUNTERS
engelhardt@108 207 //start assigner
engelhardt@108 208 uint64 tmp_cycles;
engelhardt@108 209 uint64 tmp_instrs;
Nina@109 210 saveCyclesAndInstrs(thisCoresIdx,tmp_cycles,tmp_instrs);
engelhardt@108 211 #endif
Me@21 212 schedVirtPr =
Me@31 213 (*slaveScheduler)( semanticEnv, thisCoresIdx );
Nina@109 214
Me@21 215 if( schedVirtPr != NULL )
Me@21 216 { currSlot->procrAssignedToSlot = schedVirtPr;
Me@26 217 schedVirtPr->schedSlot = currSlot;
Me@26 218 currSlot->needsProcrAssigned = FALSE;
Me@55 219 numSlotsFilled += 1;
Nina@109 220 #ifdef MEAS__PERF_COUNTERS
engelhardt@108 221 //end assigner
engelhardt@108 222 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
engelhardt@108 223 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
engelhardt@108 224 lastRecord->assigning_core = thisCoresIdx;
engelhardt@108 225 lastRecord->start_assign_cycles = tmp_cycles;
engelhardt@108 226 lastRecord->start_assign_instrs = tmp_instrs;
Nina@109 227 saveCyclesAndInstrs(thisCoresIdx,lastRecord->end_assign_cycles,lastRecord->end_assign_instrs);
engelhardt@108 228 #endif
Me@55 229 writeVMSQ( schedVirtPr, readyToAnimateQ );
Me@0 230 }
Me@0 231 }
Nina@166 232 #ifdef DETECT_LOOP_GRAPH
Nina@166 233 if(!currSlot->needsProcrAssigned) {
Nina@166 234 int lastRecordIdx = currSlot->procrAssignedToSlot->counter_history_array_info->numInArray -1;
Nina@166 235 CounterRecord* lastRecord = currSlot->procrAssignedToSlot->counter_history[lastRecordIdx];
Nina@166 236 set_bulb_member(b,slotIdx,currSlot->procrAssignedToSlot->procrID,lastRecord->task_position);
Nina@166 237 }
Nina@166 238 #endif
Me@0 239 }
Me@0 240
Me@55 241
Me@55 242 #ifdef USE_WORK_STEALING
Me@55 243 //If no slots filled, means no more work, look for work to steal.
Me@55 244 if( numSlotsFilled == 0 )
Me@55 245 { gateProtected_stealWorkInto( currSlot, readyToAnimateQ, masterPr );
Me@55 246 }
Me@55 247 #endif
Me@26 248
Me@21 249
Me@38 250 #ifdef MEAS__TIME_MASTER
Me@38 251 saveLowTimeStampCountInto( masterPr->endMasterTSCLow );
Me@38 252 #endif
Me@38 253
msach@71 254 masterSwitchToCoreLoop(animatingPr);
msach@71 255 flushRegisters();
msach@71 256 }//MasterLoop
msach@69 257
Me@62 258
Me@0 259 }
Me@0 260
Me@55 261
msach@69 262
Me@55 263 /*This has a race condition -- the coreloops are accessing their own queues
Me@55 264 * at the same time that this work-stealer on a different core is trying to
Me@55 265 */
Me@55 266 void inline
Me@55 267 stealWorkInto( SchedSlot *currSlot, VMSQueueStruc *readyToAnimateQ,
Me@55 268 VirtProcr *masterPr )
Me@55 269 {
Me@55 270 VirtProcr *stolenPr;
Me@55 271 int32 coreIdx, i;
Me@55 272 VMSQueueStruc *currQ;
Me@55 273
Me@55 274 stolenPr = NULL;
Me@55 275 coreIdx = masterPr->coreAnimatedBy;
Me@55 276 for( i = 0; i < NUM_CORES -1; i++ )
Me@55 277 {
Me@55 278 if( coreIdx >= NUM_CORES -1 )
Me@55 279 { coreIdx = 0;
Me@55 280 }
Me@55 281 else
Me@55 282 { coreIdx++;
Me@55 283 }
Me@55 284 currQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
Me@55 285 if( numInVMSQ( currQ ) > 0 )
Me@55 286 { stolenPr = readVMSQ (currQ );
Me@55 287 break;
Me@55 288 }
Me@55 289 }
Me@55 290
Me@55 291 if( stolenPr != NULL )
Me@55 292 { currSlot->procrAssignedToSlot = stolenPr;
Me@55 293 stolenPr->schedSlot = currSlot;
Me@55 294 currSlot->needsProcrAssigned = FALSE;
Me@55 295
Me@55 296 writeVMSQ( stolenPr, readyToAnimateQ );
Me@55 297 }
Me@55 298 }
Me@55 299
Me@55 300 /*This algorithm makes the common case fast. Make the coreloop passive,
Me@55 301 * and show its progress. Make the stealer control a gate that coreloop
Me@55 302 * has to pass.
Me@55 303 *To avoid interference, only one stealer at a time. Use a global
Me@55 304 * stealer-lock.
Me@55 305 *
Me@55 306 *The pattern is based on a gate -- stealer shuts the gate, then monitors
Me@55 307 * to be sure any already past make it all the way out, before starting.
Me@55 308 *So, have a "progress" measure just before the gate, then have two after it,
Me@55 309 * one is in a "waiting room" outside the gate, the other is at the exit.
Me@55 310 *Then, the stealer first shuts the gate, then checks the progress measure
Me@55 311 * outside it, then looks to see if the progress measure at the exit is the
Me@55 312 * same. If yes, it knows the protected area is empty 'cause no other way
Me@55 313 * to get in and the last to get in also exited.
Me@55 314 *If the progress measure at the exit is not the same, then the stealer goes
Me@55 315 * into a loop checking both the waiting-area and the exit progress-measures
Me@55 316 * until one of them shows the same as the measure outside the gate. Might
Me@55 317 * as well re-read the measure outside the gate each go around, just to be
Me@55 318 * sure. It is guaranteed that one of the two will eventually match the one
Me@55 319 * outside the gate.
Me@55 320 *
Me@55 321 *Here's an informal proof of correctness:
Me@55 322 *The gate can be closed at any point, and have only four cases:
Me@55 323 * 1) coreloop made it past the gate-closing but not yet past the exit
Me@55 324 * 2) coreloop made it past the pre-gate progress update but not yet past
Me@55 325 * the gate,
Me@55 326 * 3) coreloop is right before the pre-gate update
Me@55 327 * 4) coreloop is past the exit and far from the pre-gate update.
Me@55 328 *
Me@55 329 * Covering the cases in reverse order,
Me@55 330 * 4) is not a problem -- stealer will read pre-gate progress, see that it
Me@55 331 * matches exit progress, and the gate is closed, so stealer can proceed.
Me@55 332 * 3) stealer will read pre-gate progress just after coreloop updates it..
Me@55 333 * so stealer goes into a loop until the coreloop causes wait-progress
Me@55 334 * to match pre-gate progress, so then stealer can proceed
Me@55 335 * 2) same as 3..
Me@55 336 * 1) stealer reads pre-gate progress, sees that it's different than exit,
Me@55 337 * so goes into loop until exit matches pre-gate, now it knows coreloop
Me@55 338 * is not in protected and cannot get back in, so can proceed.
Me@55 339 *
Me@55 340 *Implementation for the stealer:
Me@55 341 *
Me@55 342 *First, acquire the stealer lock -- only cores with no work to do will
Me@55 343 * compete to steal, so not a big performance penalty having only one --
Me@55 344 * will rarely have multiple stealers in a system with plenty of work -- and
Me@55 345 * in a system with little work, it doesn't matter.
Me@55 346 *
Me@55 347 *Note, have single-reader, single-writer pattern for all variables used to
Me@55 348 * communicate between stealer and victims
Me@55 349 *
Me@55 350 *So, scan the queues of the core loops, until find non-empty. Each core
Me@55 351 * has its own list that it scans. The list goes in order from closest to
Me@55 352 * furthest core, so it steals first from close cores. Later can add
Me@55 353 * taking info from the app about overlapping footprints, and scan all the
Me@55 354 * others then choose work with the most footprint overlap with the contents
Me@55 355 * of this core's cache.
Me@55 356 *
Me@55 357 *Now, have a victim want to take work from. So, shut the gate in that
Me@55 358 * coreloop, by setting the "gate closed" var on its stack to TRUE.
Me@55 359 *Then, read the core's pre-gate progress and compare to the core's exit
Me@55 360 * progress.
Me@55 361 *If same, can proceed to take work from the coreloop's queue. When done,
Me@55 362 * write FALSE to gate closed var.
Me@55 363 *If different, then enter a loop that reads the pre-gate progress, then
Me@55 364 * compares to exit progress then to wait progress. When one of two
Me@55 365 * matches, proceed. Take work from the coreloop's queue. When done,
Me@55 366 * write FALSE to the gate closed var.
Me@55 367 *
Me@55 368 */
Me@55 369 void inline
Me@55 370 gateProtected_stealWorkInto( SchedSlot *currSlot,
Me@55 371 VMSQueueStruc *myReadyToAnimateQ,
Me@55 372 VirtProcr *masterPr )
Me@55 373 {
Me@55 374 VirtProcr *stolenPr;
Me@55 375 int32 coreIdx, i, haveAVictim, gotLock;
Me@55 376 VMSQueueStruc *victimsQ;
Me@55 377
Me@55 378 volatile GateStruc *vicGate;
Me@55 379 int32 coreMightBeInProtected;
Me@55 380
Me@55 381
Me@55 382
Me@55 383 //see if any other cores have work available to steal
Me@55 384 haveAVictim = FALSE;
Me@55 385 coreIdx = masterPr->coreAnimatedBy;
Me@55 386 for( i = 0; i < NUM_CORES -1; i++ )
Me@55 387 {
Me@55 388 if( coreIdx >= NUM_CORES -1 )
Me@55 389 { coreIdx = 0;
Me@55 390 }
Me@55 391 else
Me@55 392 { coreIdx++;
Me@55 393 }
Me@55 394 victimsQ = _VMSMasterEnv->readyToAnimateQs[coreIdx];
Me@55 395 if( numInVMSQ( victimsQ ) > 0 )
Me@55 396 { haveAVictim = TRUE;
Me@55 397 vicGate = _VMSMasterEnv->workStealingGates[ coreIdx ];
Me@55 398 break;
Me@55 399 }
Me@55 400 }
Me@55 401 if( !haveAVictim ) return; //no work to steal, exit
Me@55 402
Me@55 403 //have a victim core, now get the stealer-lock
Me@55 404 gotLock =__sync_bool_compare_and_swap( &(_VMSMasterEnv->workStealingLock),
Me@55 405 UNLOCKED, LOCKED );
Me@55 406 if( !gotLock ) return; //go back to core loop, which will re-start master
Me@55 407
Me@55 408
Me@55 409 //====== Start Gate-protection =======
Me@55 410 vicGate->gateClosed = TRUE;
Me@55 411 coreMightBeInProtected= vicGate->preGateProgress != vicGate->exitProgress;
Me@55 412 while( coreMightBeInProtected )
Me@55 413 { //wait until sure
Me@55 414 if( vicGate->preGateProgress == vicGate->waitProgress )
Me@55 415 coreMightBeInProtected = FALSE;
Me@55 416 if( vicGate->preGateProgress == vicGate->exitProgress )
Me@55 417 coreMightBeInProtected = FALSE;
Me@55 418 }
Me@55 419
Me@55 420 stolenPr = readVMSQ ( victimsQ );
Me@55 421
Me@55 422 vicGate->gateClosed = FALSE;
Me@55 423 //======= End Gate-protection =======
Me@55 424
Me@55 425
Me@55 426 if( stolenPr != NULL ) //victim could have been in protected and taken
Me@55 427 { currSlot->procrAssignedToSlot = stolenPr;
Me@55 428 stolenPr->schedSlot = currSlot;
Me@55 429 currSlot->needsProcrAssigned = FALSE;
Me@55 430
Me@55 431 writeVMSQ( stolenPr, myReadyToAnimateQ );
Me@55 432 }
Me@55 433
Me@55 434 //unlock the work stealing lock
Me@55 435 _VMSMasterEnv->workStealingLock = UNLOCKED;
Me@55 436 }