view MasterLoop.c @ 37:d6367cd40e21

Change in a comment from VMSHW to SSR
author Me
date Wed, 01 Sep 2010 09:18:40 -0700
parents 0e008278fe3c
children e69579a0e797
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
9 #include <stdio.h>
10 #include <malloc.h>
11 #include <stddef.h>
13 #include "VMS.h"
17 /*This code is animated by the virtual Master processor.
18 *
19 *Polls each sched slot exactly once, hands any requests made by a newly
20 * done slave to the "request handler" plug-in function
21 *
22 *Any slots that need a virt procr assigned are given to the "schedule"
23 * plug-in function, which tries to assign a virt procr (slave) to it.
24 *
25 *When all slots needing a processor have been given to the schedule plug-in,
26 * a fraction of the procrs successfully scheduled are put into the
27 * work queue, then a continuation of this function is put in, then the rest
28 * of the virt procrs that were successfully scheduled.
29 *
30 *The first thing the continuation does is busy-wait until the previous
31 * animation completes. This is because an (unlikely) continuation may
32 * sneak through queue before previous continuation is done putting second
33 * part of scheduled slaves in, which is the only race condition.
34 *
35 */
37 /*May 29, 2010 -- birth a Master during init so that first core loop to
38 * start running gets it and does all the stuff for a newly born --
39 * from then on, will be doing continuation, but do suspension self
40 * directly at end of master loop
41 *So VMS__init just births the master virtual processor same way it births
42 * all the others -- then does any extra setup needed and puts it into the
43 * work queue.
44 *However means have to make masterEnv a global static volatile the same way
45 * did with workQ in core loop. -- for performance, put the
46 * jump to the core loop directly in here, and have it directly jump back.
47 */
48 void masterLoop( void *initData, VirtProcr *masterPr )
49 {
50 int slotIdx, numFilled, filledSlotIdx, masterHasBeenQueued;
51 VirtProcr *schedVirtPr;
52 SchedSlot *currSlot, **schedSlots, **filledSlots;
53 MasterEnv *masterEnv;
54 VMSQueueStruc *workQ;
55 void *jmpPt, *stackPtrAddr, *framePtrAddr, *stillRunningAddr;
56 void *coreLoopFramePtr, *coreLoopStackPtr, *semanticEnv;
58 SlaveScheduler slaveScheduler;
59 RequestHandler requestHandler;
61 //this will run as the first virt processor in workQ, and will be a
62 // new born -- so will do all the GCC-generated allocating space on
63 // the stack owned by master virt procr -- and will run this last bit
64 // of setup code..
65 masterPr->nextInstrPt = &&masterLoopStartPt;
67 //The second time MasterVP comes out of queue, the first animation of
68 // it hasn't written the stackPtr and framePtr yet -- but the second
69 // animation has already had its stackPtr and framePtr set to the old
70 // value by the coreLoop. Fix this by writing the correct stack and
71 // frame pointers here, at which point they're correct in the first
72 // animation of MasterVP.
73 //TODO: remove writing stackPtr and framePtr at the bottom, for eff
74 stackPtrAddr = &(masterPr->stackPtr);
75 framePtrAddr = &(masterPr->framePtr);
77 asm volatile("movl %0, %%eax; \
78 movl %%esp, (%%eax); \
79 movl %1, %%eax; \
80 movl %%ebp, (%%eax); "
81 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \
82 /* inputs */ : \
83 /* clobber */ : "memory", "%eax", "%ebx" \
84 );
87 masterLoopStartPt:
89 //if another reference to same Master VirtProcr still going, busy-wait
90 //Could put this lower, but don't want to think about shared stack..
91 while( _VMSMasterEnv->stillRunning ) /*busy wait*/ ;
92 //TODO: want to do busy-wait as assembly, to be sure stack not touched?
94 //this is the only master running now, set flag again
95 _VMSMasterEnv->stillRunning = TRUE;
96 masterEnv = _VMSMasterEnv;
98 //TODO: gdb -- check that a volatile _VMSMasterEnv and _VMSWorkQ means
99 // all these will be re-filled every time jump here..
100 workQ = _VMSWorkQ;
101 requestHandler = masterEnv->requestHandler;
102 slaveScheduler = masterEnv->slaveScheduler;
103 schedSlots = masterEnv->schedSlots;
104 filledSlots = masterEnv->filledSlots;
105 masterPr = masterEnv->masterVirtPr; //post-jmp clobbered, re-load
106 semanticEnv = masterEnv->semanticEnv;
108 //prepare for scheduling
109 numFilled = 0;
110 masterHasBeenQueued = FALSE;
112 //Poll each slot's Done flag -- slot 0 reserved for master, start at 1
113 for( slotIdx = 0; slotIdx < NUM_SCHED_SLOTS; slotIdx++)
114 {
115 currSlot = schedSlots[ slotIdx ];
117 if( currSlot->workIsDone )
118 {
119 currSlot->workIsDone = FALSE;
120 currSlot->needsProcrAssigned = TRUE;
122 //process requests from slave to master
123 (*requestHandler)( currSlot->procrAssignedToSlot, semanticEnv );
124 }
125 if( currSlot->needsProcrAssigned )
126 { //give slot a new virt procr
127 schedVirtPr =
128 (*slaveScheduler)( semanticEnv );
130 if( schedVirtPr != NULL )
131 { currSlot->procrAssignedToSlot = schedVirtPr;
132 schedVirtPr->schedSlot = currSlot;
133 currSlot->needsProcrAssigned = FALSE;
135 filledSlots[ numFilled ] = currSlot;
137 writeVMSQ( schedVirtPr, workQ );
138 numFilled += 1;
140 if( numFilled == masterEnv->numToPrecede )
141 {
142 writeVMSQ( masterEnv->masterVirtPr, workQ );
143 masterHasBeenQueued = TRUE;
144 }
146 }
147 }
148 }
150 if( !masterHasBeenQueued )
151 {
152 writeVMSQ( masterEnv->masterVirtPr, workQ );
153 }
155 //Adjust the number to precede, for next round -- assume rate of
156 // finishing work is stable -- which is a bad assumption! But, just
157 // want something working for the moment, look at dynamic behavior
158 // later
159 //TODO: look at dynamic behavior -- time-average numToPrecede or something
160 if( numFilled < NUM_CORES - 1 )
161 {
162 masterEnv->numToPrecede = 1;
163 }
164 else
165 { masterEnv->numToPrecede = numFilled - NUM_CORES + 1;
166 }
168 //Save stack ptr and frame -- don't need to, take out later, but safe
169 // Also, wait to set stillRunning to FALSE until just before jump, to
170 // be safe -- although the two simulatneously animated MasterLoops
171 // are on different cores, so have different stacks, so no worries
172 // there.
173 //Restore CoreLoop's stack frame (and stack pointer, to be safe)
174 //TODO: cafefully verify don't need to force saving anything to stack
175 // before jumping back to core loop.
176 stackPtrAddr = &(masterPr->stackPtr);
177 framePtrAddr = &(masterPr->framePtr);
178 stillRunningAddr = &(_VMSMasterEnv->stillRunning); //when race condition
179 //arises, stillRunning is shared between the two cores both animating
180 // MasterLoop -- but those two cores have different esp & ebp, so safe
181 // to change stack and frame pointer here, without one messing up other
182 // one
184 jmpPt = masterPr->coreLoopStartPt;
185 coreLoopFramePtr = masterPr->coreLoopFramePtr;//need this only
186 coreLoopStackPtr = masterPr->coreLoopStackPtr;//shouldn't need -- safety
188 asm volatile("movl %0, %%eax; \
189 movl %%esp, (%%eax); \
190 movl %1, %%eax; \
191 movl %%ebp, (%%eax); \
192 movl %2, %%ebx; \
193 movl %3, %%eax; \
194 movl %4, %%esp; \
195 movl %5, %%ebp; \
196 movl $0x0, (%%ebx); \
197 jmp %%eax;" \
198 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr), \
199 "=g"(stillRunningAddr) \
200 /* inputs */ : "g" (jmpPt), "g"(coreLoopStackPtr), "g"(coreLoopFramePtr)\
201 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi" \
202 );//can probably make clobber list empty -- but safe for now
203 }