Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
view VMS.c @ 13:4b58b9a2527b
Middle of testing core loop
| author | Me |
|---|---|
| date | Sat, 19 Jun 2010 19:26:49 -0700 |
| parents | 9a1b7de19e39 |
| children | 65c8fb2821ee |
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
15 /*Setup has two phases:
16 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
17 * the master virt procr into the work-queue, ready for first "call"
18 * 2) Semantic layer then does its own init, which creates the seed virt
19 * procr inside the semantic layer, ready to schedule it when
20 * asked by the first run of the masterLoop.
21 *
22 *This part is bit weird because VMS really wants to be "always there", and
23 * have applications attach and detach.. for now, this VMS is part of
24 * the app, so the VMS system starts up as part of running the app.
25 *
26 *The semantic layer is isolated from the VMS internals by making the
27 * semantic layer do setup to a state that it's ready with its
28 * initial virt procrs, ready to schedule them to slots when the masterLoop
29 * asks. Without this pattern, the semantic layer's setup would
30 * have to modify slots directly to assign the initial virt-procrs, and put
31 * them into the workQ itself, breaking the isolation completely.
32 *
33 *
34 *The semantic layer creates the initial virt procr(s), and adds its
35 * own environment to masterEnv, and fills in the pointers to
36 * the requestHandler and slaveScheduler plug-in functions
37 */
39 void
40 create_sched_slots( MasterEnv *masterEnv );
43 /*This allocates VMS data structures, populates the master VMSProc,
44 * and master environment, and returns the master environment to the semantic
45 * layer.
46 */
47 void
48 VMS__init()
49 { MasterEnv *masterEnv;
50 CASQueueStruc *workQ;
52 //Make the central work-queue
53 _VMSWorkQ = makeCASQ();
54 workQ = _VMSWorkQ;
56 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
57 masterEnv = _VMSMasterEnv;
59 //create the master virtual processor
60 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
62 create_sched_slots( masterEnv );
64 //Set slot 0 to be the master virt procr & set flags just in case
65 masterEnv->schedSlots[0]->needsProcrAssigned = FALSE; //says don't touch
66 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch
67 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr;
69 //First core loop to start up gets this, which will schedule seed Pr
70 //TODO: debug: check address of masterVirtPr
71 //TODO: commented out for debugging -- put it back in!!
72 // writeCASQ( masterEnv->masterVirtPr, workQ );
74 numProcrsCreated = 1;
75 }
78 void
79 create_sched_slots( MasterEnv *masterEnv )
80 { SchedSlot **schedSlots, **filledSlots;
81 int i;
83 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
84 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
85 masterEnv->schedSlots = schedSlots;
86 masterEnv->filledSlots = filledSlots;
88 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
89 {
90 schedSlots[i] = malloc( sizeof(SchedSlot) );
92 //Set state to mean "handling requests done, slot needs filling"
93 schedSlots[i]->workIsDone = FALSE;
94 schedSlots[i]->needsProcrAssigned = TRUE;
95 }
96 }
99 /*Semantic layer calls this when it want the system to start running..
100 *
101 *This creates the core loops, pins them to physical cores, gives them the
102 * pointer to the workQ, and starts them running.
103 */
104 void
105 VMS__start()
106 { int coreIdx;
108 //Create the win threads that animate the core loops
109 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
110 {
111 coreLoopThdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) );
112 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
114 coreLoopThdHandles[coreIdx] =
115 CreateThread ( NULL, // Security attributes
116 0, // Stack size
117 coreLoop,
118 coreLoopThdParams[coreIdx],
119 CREATE_SUSPENDED,
120 &(coreLoopThdIds[coreIdx])
121 );
122 ResumeThread( coreLoopThdHandles[coreIdx] ); //starts thread
123 }
124 }
128 /*Create stack, then create __cdecl structure on it and put initialData and
129 * pointer to the new structure instance into the parameter positions on
130 * the stack
131 *Then put function pointer into nextInstrPt -- the stack is setup in std
132 * call structure, so jumping to function ptr is same as a GCC generated
133 * function call
134 *No need to save registers on old stack frame, because there's no old
135 * animator state to return to --
136 *
137 */
138 VirtProcr *
139 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
140 { VirtProcr *newPr;
141 char *stackLocs, *stackPtr;
143 newPr = malloc( sizeof(VirtProcr) );
144 newPr->procrID = numProcrsCreated++;
145 newPr->nextInstrPt = fnPtr;
146 newPr->initialData = initialData;
148 //alloc stack locations, make stackPtr be the highest addr minus room
149 // for 2 params. Put initData at stackPtr, animatingPr just above
150 stackLocs = malloc( 0x100000 ); //1 meg stack -- default Win thread's size
151 stackPtr = ( (char *)stackLocs + 0x100000 - 0x8 );
152 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
153 *( (int *)stackPtr + 1) = (int) newPr; //rightmost param -- 32bit pointer
154 *( (int *)stackPtr ) = (int) initialData; //next param to left
155 newPr->stackPtr = stackPtr; //core loop will switch to this, then
156 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
158 return newPr;
159 }
162 /*This inserts the semantic-layer's data into the standard VMS carrier
163 */
164 inline void
165 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
166 { SlaveReqst *req;
168 req = malloc( sizeof(SlaveReqst) );
169 req->slaveFrom = callingPr;
170 req->semReqData = semReqData;
171 req->nextRequest = callingPr->requests;
172 callingPr->requests = req;
173 }
175 /*there is a label inside this function -- save the addr of this label in
176 * the callingPr struc, as the pick-up point from which to start the next
177 * work-unit for that procr. If turns out have to save registers, then
178 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
179 * "done with work-unit" label. The procr struc is in the request in the
180 * slave that animated the just-ended work-unit, so all the state is saved
181 * there, and will get passed along, inside the request handler, to the
182 * next work-unit for that procr.
183 */
184 void
185 VMS__suspend_processor( VirtProcr *callingPr )
186 { void *jmpPt, *stackPtr, *framePtr;
188 callingPr->nextInstrPt = &&ResumePt;
190 //return ownership of the virt procr and sched slot to Master virt pr
191 callingPr->schedSlot->workIsDone = TRUE;
193 jmpPt = callingPr->coreLoopStartPt;
194 stackPtr = &(callingPr->stackPtr);
195 framePtr = &(callingPr->framePtr);
197 //put all regs in the clobber list to make sure GCC has saved all
198 // so safe to jump to core loop, where they *will* get clobbered
199 asm volatile("movl %%esp, %0; \
200 movl %%ebp, %1; \
201 jmp %2 "
202 /* outputs */ : "=g" (stackPtr), "=g" (framePtr)
203 /* inputs */ : "g" (jmpPt)
204 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi"
205 ); //list everything as clobbered to force GCC to save all
206 // live vars that are in regs on stack before this
207 // assembly, so that stack pointer is correct, before jmp
209 ResumePt:
210 return;
211 }
213 void
214 VMS__dissipate_animating_processor( VirtProcr *animatingPr )
215 {
217 }
219 /*This runs in main thread -- so can only signal to the core loop to shut
220 * itself down --
221 *
222 *Want the master to decide when to shut down -- when semantic layer tells it
223 * to -- say, when all the application-virtual processors have dissipated.
224 *
225 *Maybe return a special code from scheduling plug-in.. master checks and
226 * when sees, it shuts down the core loops -- does this by scheduling a
227 * special virt processor whose next instr pt is the core-end label.
228 */
229 void
230 VMS__shutdown()
231 { int coreIdx;
233 //Create the win threads that animate the core loops
234 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
235 {
237 }
238 }
242 inline TSCount getTSCount()
243 { unsigned int low, high;
244 TSCount out;
246 saveTimeStampCountInto( low, high );
247 out = high;
248 out = (out << 32) + low;
249 return out;
250 }
