Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
view VMS.c @ 8:9a1b7de19e39
Compiles -- with win thds
| author | Me |
|---|---|
| date | Tue, 01 Jun 2010 05:33:01 -0700 |
| parents | cf5007e51b96 |
| children | d801fe740275 |
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
15 /*Setup has two phases:
16 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
17 * the master virt procr into the work-queue, ready for first "call"
18 * 2) Semantic layer then does its own init, which creates the seed virt
19 * procr inside the semantic layer, ready to schedule it when
20 * asked by the first run of the masterLoop.
21 *
22 *This part is bit weird because VMS really wants to be "always there", and
23 * have applications attach and detach.. for now, this VMS is part of
24 * the app, so the VMS system starts up as part of running the app.
25 *
26 *The semantic layer is isolated from the VMS internals by making the
27 * semantic layer do setup to a state that it's ready with its
28 * initial virt procrs, ready to schedule them to slots when the masterLoop
29 * asks. Without this pattern, the semantic layer's setup would
30 * have to modify slots directly to assign the initial virt-procrs, and put
31 * them into the workQ itself, breaking the isolation completely.
32 *
33 *
34 *The semantic layer creates the initial virt procr(s), and adds its
35 * own environment to masterEnv, and fills in the pointers to
36 * the requestHandler and slaveScheduler plug-in functions
37 */
39 void
40 create_sched_slots( MasterEnv *masterEnv );
43 /*This allocates VMS data structures, populates the master VMSProc,
44 * and master environment, and returns the master environment to the semantic
45 * layer.
46 */
47 void
48 VMS__init()
49 { MasterEnv *masterEnv;
50 QueueStruc *workQ;
52 //Make the central work-queue
53 _VMSWorkQ = makeQ();
54 workQ = _VMSWorkQ;
56 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
57 masterEnv = _VMSMasterEnv;
59 //create the master virtual processor
60 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
62 create_sched_slots( masterEnv );
64 //Set slot 0 to be the master virt procr & set flags just in case
65 masterEnv->schedSlots[0]->needsProcrAssigned = FALSE; //says don't touch
66 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch
67 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr;
69 //First core loop to start up gets this, which will schedule seed Pr
70 //TODO: debug: check address of masterVirtPr
71 writeQ( masterEnv->masterVirtPr, workQ );
72 }
75 void
76 create_sched_slots( MasterEnv *masterEnv )
77 { SchedSlot **schedSlots, **filledSlots;
78 int i;
80 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
81 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
82 masterEnv->schedSlots = schedSlots;
83 masterEnv->filledSlots = filledSlots;
85 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
86 {
87 schedSlots[i] = malloc( sizeof(SchedSlot) );
89 //Set state to mean "handling requests done, slot needs filling"
90 schedSlots[i]->workIsDone = FALSE;
91 schedSlots[i]->needsProcrAssigned = TRUE;
92 }
93 }
96 /*Semantic layer calls this when it want the system to start running..
97 *
98 *This creates the core loops, pins them to physical cores, gives them the
99 * pointer to the workQ, and starts them running.
100 */
101 void
102 VMS__start()
103 { int retCode, coreIdx;
105 //Create the win threads that animate the core loops
106 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
107 {
108 thdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) );
109 thdParams[coreIdx]->coreNum = coreIdx;
111 coreLoopThds[coreIdx] =
112 CreateThread ( NULL, // Security attributes
113 0, // Stack size
114 coreLoop,
115 thdParams[coreIdx],
116 CREATE_SUSPENDED,
117 &(thdIds[coreIdx])
118 );
119 ResumeThread( coreLoopThds[coreIdx] ); //starts thread
120 }
121 }
125 /*Create stack, then create __cdecl structure on it and put initialData and
126 * pointer to the new structure instance into the parameter positions on
127 * the stack
128 *Then put function pointer into nextInstrPt -- the stack is setup in std
129 * call structure, so jumping to function ptr is same as a GCC generated
130 * function call
131 *No need to save registers on old stack frame, because there's no old
132 * animator state to return to --
133 *
134 */
135 VirtProcr *
136 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
137 { VirtProcr *newPr;
138 char *stackLocs, *stackPtr;
140 newPr = malloc( sizeof(VirtProcr) );
141 newPr->nextInstrPt = fnPtr;
142 newPr->initialData = initialData;
144 //alloc stack locations, make stackPtr be the highest addr minus room
145 // for 2 params. Put initData at stackPtr, animatingPr just above
146 stackLocs = malloc( 0x100000 ); //1 meg stack -- default Win thread's size
147 stackPtr = ( (char *)stackLocs + 0x100000 - 0x8 );
148 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
149 *(stackPtr + 4) = newPr; //rightmost param
150 *stackPtr = initialData; //next param to left
151 newPr->stackPtr = stackPtr; //core loop will switch to this, then
152 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
154 return newPr;
155 }
158 /*This inserts the semantic-layer's data into the standard VMS carrier
159 */
160 inline void
161 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
162 { SlaveReqst *req;
164 req = malloc( sizeof(SlaveReqst) );
165 req->slaveFrom = callingPr;
166 req->semReqData = semReqData;
167 req->nextRequest = callingPr->requests;
168 callingPr->requests = req;
169 }
171 /*there is a label inside this function -- save the addr of this label in
172 * the callingPr struc, as the pick-up point from which to start the next
173 * work-unit for that procr. If turns out have to save registers, then
174 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
175 * "done with work-unit" label. The procr struc is in the request in the
176 * slave that animated the just-ended work-unit, so all the state is saved
177 * there, and will get passed along, inside the request handler, to the
178 * next work-unit for that procr.
179 */
180 void
181 VMS__suspend_processor( VirtProcr *callingPr )
182 { void *jmpPt, *stackPtr, *framePtr;
184 callingPr->nextInstrPt = &&ResumePt;
186 //return ownership of the virt procr and sched slot to Master virt pr
187 callingPr->schedSlot->workIsDone = TRUE;
189 jmpPt = callingPr->coreLoopStartPt;
190 stackPtr = &(callingPr->stackPtr);
191 framePtr = &(callingPr->framePtr);
193 //put all regs in the clobber list to make sure GCC has saved all
194 // so safe to jump to core loop, where they *will* get clobbered
195 asm volatile("movl %%esp, %0; \
196 movl %%ebp, %1; \
197 jmp %2 "
198 /* outputs */ : "=g" (stackPtr), "=g" (framePtr)
199 /* inputs */ : "g" (jmpPt)
200 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi"
201 );
203 ResumePt:
204 return;
205 }
207 void
208 VMS__dissipate_animating_processor( VirtProcr *animatingPr )
209 {
211 }
213 /*This runs in main thread -- so can only signal to the core loop to shut
214 * itself down --
215 *
216 *Want the master to decide when to shut down -- when semantic layer tells it
217 * to -- say, when all the application-virtual processors have dissipated.
218 *
219 *Maybe return a special code from scheduling plug-in.. master checks and
220 * when sees, it shuts down the core loops -- does this by scheduling a
221 * special virt processor whose next instr pt is the core-end label.
222 */
223 void
224 VMS__shutdown()
225 { int coreIdx;
227 //Create the win threads that animate the core loops
228 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
229 {
231 }
232 }
