annotate MasterLoop.c @ 21:a0af8d4fca35

Full VMS test -- works
author Me
date Wed, 30 Jun 2010 13:10:34 -0700
parents e2de204909bf
children 668278fa7a63
rev   line source
Me@0 1 /*
Me@0 2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
Me@0 3 *
Me@0 4 * Licensed under BSD
Me@0 5 */
Me@0 6
Me@0 7
Me@0 8
Me@9 9 #include <windows.h>
Me@0 10 #include <stdio.h>
Me@0 11 #include <malloc.h>
Me@9 12 #include <stddef.h>
Me@0 13
Me@0 14 #include "VMS.h"
Me@0 15
Me@0 16
Me@0 17
Me@0 18 /*This code is animated by the virtual Master processor.
Me@0 19 *
Me@11 20 *Polls each sched slot exactly once, hands any requests made by a newly
Me@11 21 * done slave to the "request handler" plug-in function
Me@0 22 *
Me@11 23 *Any slots that need a virt procr assigned are given to the "schedule"
Me@11 24 * plug-in function, which tries to assign a virt procr (slave) to it.
Me@0 25 *
Me@11 26 *When all slots needing a processor have been given to the schedule plug-in,
Me@11 27 * a fraction of the procrs successfully scheduled are put into the
Me@11 28 * work queue, then a continuation of this function is put in, then the rest
Me@11 29 * of the virt procrs that were successfully scheduled.
Me@0 30 *
Me@11 31 *The first thing the continuation does is busy-wait until the previous
Me@11 32 * animation completes. This is because an (unlikely) continuation may
Me@11 33 * sneak through queue before previous continuation is done putting second
Me@11 34 * part of scheduled slaves in, which is the only race condition.
Me@0 35 *
Me@0 36 */
Me@0 37
Me@4 38 /*May 29, 2010 -- birth a Master during init so that first core loop to
Me@11 39 * start running gets it and does all the stuff for a newly born --
Me@11 40 * from then on, will be doing continuation, but do suspension self
Me@4 41 * directly at end of master loop
Me@4 42 *So VMS__init just births the master virtual processor same way it births
Me@4 43 * all the others -- then does any extra setup needed and puts it into the
Me@4 44 * work queue.
Me@4 45 *However means have to make masterEnv a global static volatile the same way
Me@4 46 * did with workQ in core loop. -- for performance, put the
Me@11 47 * jump to the core loop directly in here, and have it directly jump back.
Me@4 48 */
Me@4 49 void masterLoop( void *initData, VirtProcr *masterPr )
Me@21 50 {
Me@21 51 int slotIdx, numFilled, numInFirstChunk, filledSlotIdx;
Me@21 52 VirtProcr *schedVirtPr;
Me@4 53 SchedSlot *currSlot, **schedSlots, **filledSlots;
Me@0 54 MasterEnv *masterEnv;
Me@11 55 CASQueueStruc *workQ;
Me@21 56 void *jmpPt, *stackPtrAddr, *framePtrAddr, *stillRunningAddr;
Me@21 57 void *coreLoopFramePtr, *coreLoopStackPtr, *semanticEnv;
Me@4 58
Me@0 59 SlaveScheduler slaveScheduler;
Me@0 60 RequestHandler requestHandler;
Me@0 61
Me@4 62 //this will run as the first virt processor in workQ, and will be a
Me@4 63 // new born -- so will do all the GCC-generated allocating space on
Me@4 64 // the stack owned by master virt procr -- and will run this last bit
Me@4 65 // of setup code..
Me@4 66 masterPr->nextInstrPt = &&masterLoopStartPt;
Me@0 67
Me@4 68
Me@4 69 masterLoopStartPt:
Me@0 70
Me@4 71 //if another reference to same Master VirtProcr still going, busy-wait
Me@4 72 //Could put this lower, but don't want to think about shared stack..
Me@21 73 while( _VMSMasterEnv->stillRunning ) /*busy wait*/ ;
Me@4 74 //TODO: want to do busy-wait as assembly, to be sure stack not touched?
Me@4 75
Me@4 76 //this is the only master running now, set flag again
Me@21 77 _VMSMasterEnv->stillRunning = TRUE;
Me@21 78 masterEnv = _VMSMasterEnv;
Me@4 79
Me@4 80 //TODO: gdb -- check that a volatile _VMSMasterEnv and _VMSWorkQ means
Me@4 81 // all these will be re-filled every time jump here..
Me@4 82 workQ = _VMSWorkQ;
Me@0 83 requestHandler = masterEnv->requestHandler;
Me@0 84 slaveScheduler = masterEnv->slaveScheduler;
Me@4 85 schedSlots = masterEnv->schedSlots;
Me@4 86 filledSlots = masterEnv->filledSlots;
Me@11 87 masterPr = masterEnv->masterVirtPr; //post-jmp clobbered, re-load
Me@21 88 semanticEnv = masterEnv->semanticEnv;
Me@0 89
Me@0 90 //prepare for scheduling
Me@4 91 masterEnv->numFilled = 0;
Me@0 92
Me@21 93 //Poll each slot's Done flag -- slot 0 reserved for master, start at 1
Me@4 94 for( slotIdx = 1; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
Me@0 95 {
Me@4 96 currSlot = schedSlots[ slotIdx ];
Me@0 97
Me@4 98 if( currSlot->workIsDone )
Me@0 99 {
Me@4 100 currSlot->workIsDone = FALSE;
Me@4 101 currSlot->needsProcrAssigned = TRUE;
Me@0 102
Me@0 103 //process requests from slave to master
Me@21 104 (*requestHandler)( currSlot->procrAssignedToSlot, semanticEnv );
Me@0 105 }
Me@4 106 if( currSlot->needsProcrAssigned )
Me@4 107 { //give slot a new virt procr
Me@21 108 schedVirtPr =
Me@21 109 (*slaveScheduler)( semanticEnv );
Me@0 110
Me@21 111 if( schedVirtPr != NULL )
Me@21 112 { currSlot->procrAssignedToSlot = schedVirtPr;
Me@21 113 schedVirtPr->schedSlot = currSlot;
Me@4 114
Me@21 115 filledSlots[ masterEnv->numFilled ] = currSlot;
Me@4 116 masterEnv->numFilled += 1;
Me@4 117
Me@4 118 currSlot->needsProcrAssigned = FALSE;
Me@0 119 }
Me@0 120 }
Me@0 121 }
Me@0 122
Me@21 123 //put some scheduled slaves in, then Master continuation, then rest
Me@21 124 //Adjust position of master such that it maintains close to a fixed
Me@21 125 // ratio --> make NUM_CORES - 1 slots or fewer come after the master
Me@21 126 numFilled = masterEnv->numFilled;
Me@21 127
Me@21 128 int numPrecede = numFilled;
Me@21 129 int numFollow = NUM_CORES - 1;
Me@21 130
Me@21 131 if( numFilled < numFollow )
Me@21 132 { numFollow = numFilled;
Me@21 133 numPrecede = 0;
Me@21 134 }
Me@21 135 else
Me@21 136 { numPrecede -= numFollow;
Me@21 137 }
Me@21 138
Me@21 139 for( filledSlotIdx = 0; filledSlotIdx < numPrecede; filledSlotIdx++)
Me@0 140 {
Me@11 141 writeCASQ( filledSlots[ filledSlotIdx ]->procrAssignedToSlot, workQ );
Me@0 142 }
Me@0 143
Me@0 144 //enqueue continuation of this loop
Me@0 145 // note that After this enqueue, continuation might sneak through
Me@21 146 writeCASQ( masterEnv->masterVirtPr, workQ );
Me@21 147
Me@21 148 for( filledSlotIdx = numPrecede;
Me@21 149 filledSlotIdx < numFilled;
Me@4 150 filledSlotIdx++)
Me@0 151 {
Me@11 152 writeCASQ( filledSlots[ filledSlotIdx ]->procrAssignedToSlot, workQ );
Me@0 153 }
Me@0 154
Me@4 155 masterEnv->numFilled = 0;
Me@4 156
Me@4 157
Me@4 158 //Save stack ptr and frame -- don't need to, take out later, but safe
Me@4 159 // Also, wait to set stillRunning to FALSE until just before jump, to
Me@21 160 // be safe -- although the two simulatneously animated MasterLoops
Me@21 161 // are on different cores, so have different stacks, so no worries
Me@21 162 // there.
Me@21 163 //Restore CoreLoop's stack frame (and stack pointer, to be safe)
Me@21 164 //TODO: cafefully verify don't need to force saving anything to stack
Me@21 165 // before jumping back to core loop.
Me@21 166 stackPtrAddr = &(masterPr->stackPtr);
Me@21 167 framePtrAddr = &(masterPr->framePtr);
Me@21 168 stillRunningAddr = &(_VMSMasterEnv->stillRunning); //when race condition
Me@21 169 //arises, stillRunning is shared between the two cores both animating
Me@21 170 // MasterLoop -- but those two cores have different esp & ebp, so safe
Me@21 171 // to change stack and frame pointer here, without one messing up other
Me@21 172 // one
Me@21 173
Me@21 174 jmpPt = masterPr->coreLoopStartPt;
Me@21 175 coreLoopFramePtr = masterPr->coreLoopFramePtr;//need this only
Me@21 176 coreLoopStackPtr = masterPr->coreLoopStackPtr;//shouldn't need -- safety
Me@21 177
Me@21 178 asm volatile("movl %0, %%eax; \
Me@21 179 movl %%esp, (%%eax); \
Me@21 180 movl %1, %%eax; \
Me@21 181 movl %%ebp, (%%eax); \
Me@21 182 movl %2, %%ebx; \
Me@21 183 movl %3, %%eax; \
Me@21 184 movl %4, %%esp; \
Me@21 185 movl %5, %%ebp; \
Me@21 186 movl $0x0, (%%ebx); \
Me@21 187 jmp %%eax " \
Me@21 188 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr), \
Me@21 189 "=g"(stillRunningAddr) \
Me@21 190 /* inputs */ : "g" (jmpPt), "g"(coreLoopStackPtr), "g"(coreLoopFramePtr)\
Me@21 191 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi" \
Me@21 192 );//can probably make clobber list empty -- but safe for now
Me@0 193 }
Me@0 194
Me@0 195