| rev |
line source |
|
Me@0
|
1 /*
|
|
Me@0
|
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
|
|
Me@0
|
3 *
|
|
Me@0
|
4 * Licensed under BSD
|
|
Me@0
|
5 */
|
|
Me@0
|
6
|
|
Me@0
|
7 #include <stdio.h>
|
|
Me@0
|
8 #include <stdlib.h>
|
|
Me@0
|
9 #include <malloc.h>
|
|
Me@0
|
10
|
|
Me@0
|
11 #include "VMS.h"
|
|
Me@0
|
12 #include "Queue_impl/BlockingQueue.h"
|
|
Me@0
|
13
|
|
Me@0
|
14
|
|
Me@0
|
15 /*Setup has two phases:
|
|
Me@0
|
16 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
|
|
Me@8
|
17 * the master virt procr into the work-queue, ready for first "call"
|
|
Me@8
|
18 * 2) Semantic layer then does its own init, which creates the seed virt
|
|
Me@8
|
19 * procr inside the semantic layer, ready to schedule it when
|
|
Me@0
|
20 * asked by the first run of the masterLoop.
|
|
Me@0
|
21 *
|
|
Me@0
|
22 *This part is bit weird because VMS really wants to be "always there", and
|
|
Me@0
|
23 * have applications attach and detach.. for now, this VMS is part of
|
|
Me@0
|
24 * the app, so the VMS system starts up as part of running the app.
|
|
Me@0
|
25 *
|
|
Me@8
|
26 *The semantic layer is isolated from the VMS internals by making the
|
|
Me@8
|
27 * semantic layer do setup to a state that it's ready with its
|
|
Me@8
|
28 * initial virt procrs, ready to schedule them to slots when the masterLoop
|
|
Me@0
|
29 * asks. Without this pattern, the semantic layer's setup would
|
|
Me@8
|
30 * have to modify slots directly to assign the initial virt-procrs, and put
|
|
Me@0
|
31 * them into the workQ itself, breaking the isolation completely.
|
|
Me@0
|
32 *
|
|
Me@0
|
33 *
|
|
Me@8
|
34 *The semantic layer creates the initial virt procr(s), and adds its
|
|
Me@8
|
35 * own environment to masterEnv, and fills in the pointers to
|
|
Me@0
|
36 * the requestHandler and slaveScheduler plug-in functions
|
|
Me@8
|
37 */
|
|
Me@8
|
38
|
|
Me@8
|
39 void
|
|
Me@8
|
40 create_sched_slots( MasterEnv *masterEnv );
|
|
Me@8
|
41
|
|
Me@8
|
42
|
|
Me@8
|
43 /*This allocates VMS data structures, populates the master VMSProc,
|
|
Me@0
|
44 * and master environment, and returns the master environment to the semantic
|
|
Me@0
|
45 * layer.
|
|
Me@0
|
46 */
|
|
Me@8
|
47 void
|
|
Me@8
|
48 VMS__init()
|
|
Me@1
|
49 { MasterEnv *masterEnv;
|
|
Me@1
|
50 QueueStruc *workQ;
|
|
Me@1
|
51
|
|
Me@0
|
52 //Make the central work-queue
|
|
Me@1
|
53 _VMSWorkQ = makeQ();
|
|
Me@1
|
54 workQ = _VMSWorkQ;
|
|
Me@0
|
55
|
|
Me@1
|
56 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
|
|
Me@1
|
57 masterEnv = _VMSMasterEnv;
|
|
Me@0
|
58
|
|
Me@8
|
59 //create the master virtual processor
|
|
Me@8
|
60 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
|
|
Me@0
|
61
|
|
Me@1
|
62 create_sched_slots( masterEnv );
|
|
Me@0
|
63
|
|
Me@8
|
64 //Set slot 0 to be the master virt procr & set flags just in case
|
|
Me@8
|
65 masterEnv->schedSlots[0]->needsProcrAssigned = FALSE; //says don't touch
|
|
Me@8
|
66 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch
|
|
Me@1
|
67 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr;
|
|
Me@1
|
68
|
|
Me@1
|
69 //First core loop to start up gets this, which will schedule seed Pr
|
|
Me@1
|
70 //TODO: debug: check address of masterVirtPr
|
|
Me@8
|
71 writeQ( masterEnv->masterVirtPr, workQ );
|
|
Me@0
|
72 }
|
|
Me@0
|
73
|
|
Me@0
|
74
|
|
Me@0
|
75 void
|
|
Me@1
|
76 create_sched_slots( MasterEnv *masterEnv )
|
|
Me@8
|
77 { SchedSlot **schedSlots, **filledSlots;
|
|
Me@0
|
78 int i;
|
|
Me@0
|
79
|
|
Me@8
|
80 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
|
|
Me@8
|
81 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
|
|
Me@8
|
82 masterEnv->schedSlots = schedSlots;
|
|
Me@8
|
83 masterEnv->filledSlots = filledSlots;
|
|
Me@8
|
84
|
|
Me@1
|
85 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
|
|
Me@0
|
86 {
|
|
Me@8
|
87 schedSlots[i] = malloc( sizeof(SchedSlot) );
|
|
Me@8
|
88
|
|
Me@1
|
89 //Set state to mean "handling requests done, slot needs filling"
|
|
Me@8
|
90 schedSlots[i]->workIsDone = FALSE;
|
|
Me@8
|
91 schedSlots[i]->needsProcrAssigned = TRUE;
|
|
Me@0
|
92 }
|
|
Me@0
|
93 }
|
|
Me@0
|
94
|
|
Me@8
|
95
|
|
Me@0
|
96 /*Semantic layer calls this when it want the system to start running..
|
|
Me@0
|
97 *
|
|
Me@0
|
98 *This creates the core loops, pins them to physical cores, gives them the
|
|
Me@0
|
99 * pointer to the workQ, and starts them running.
|
|
Me@0
|
100 */
|
|
Me@0
|
101 void
|
|
Me@0
|
102 VMS__start()
|
|
Me@0
|
103 { int retCode, coreIdx;
|
|
Me@0
|
104
|
|
Me@8
|
105 //Create the win threads that animate the core loops
|
|
Me@8
|
106 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
|
|
Me@8
|
107 {
|
|
Me@8
|
108 thdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) );
|
|
Me@8
|
109 thdParams[coreIdx]->coreNum = coreIdx;
|
|
Me@0
|
110
|
|
Me@8
|
111 coreLoopThds[coreIdx] =
|
|
Me@8
|
112 CreateThread ( NULL, // Security attributes
|
|
Me@8
|
113 0, // Stack size
|
|
Me@8
|
114 coreLoop,
|
|
Me@8
|
115 thdParams[coreIdx],
|
|
Me@8
|
116 CREATE_SUSPENDED,
|
|
Me@8
|
117 &(thdIds[coreIdx])
|
|
Me@8
|
118 );
|
|
Me@8
|
119 ResumeThread( coreLoopThds[coreIdx] ); //starts thread
|
|
Me@8
|
120 }
|
|
Me@8
|
121 }
|
|
Me@0
|
122
|
|
Me@0
|
123
|
|
Me@0
|
124
|
|
Me@8
|
125 /*Create stack, then create __cdecl structure on it and put initialData and
|
|
Me@8
|
126 * pointer to the new structure instance into the parameter positions on
|
|
Me@8
|
127 * the stack
|
|
Me@8
|
128 *Then put function pointer into nextInstrPt -- the stack is setup in std
|
|
Me@8
|
129 * call structure, so jumping to function ptr is same as a GCC generated
|
|
Me@8
|
130 * function call
|
|
Me@8
|
131 *No need to save registers on old stack frame, because there's no old
|
|
Me@8
|
132 * animator state to return to --
|
|
Me@8
|
133 *
|
|
Me@8
|
134 */
|
|
Me@8
|
135 VirtProcr *
|
|
Me@8
|
136 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
|
|
Me@8
|
137 { VirtProcr *newPr;
|
|
Me@8
|
138 char *stackLocs, *stackPtr;
|
|
Me@8
|
139
|
|
Me@8
|
140 newPr = malloc( sizeof(VirtProcr) );
|
|
Me@8
|
141 newPr->nextInstrPt = fnPtr;
|
|
Me@8
|
142 newPr->initialData = initialData;
|
|
Me@8
|
143
|
|
Me@8
|
144 //alloc stack locations, make stackPtr be the highest addr minus room
|
|
Me@8
|
145 // for 2 params. Put initData at stackPtr, animatingPr just above
|
|
Me@8
|
146 stackLocs = malloc( 0x100000 ); //1 meg stack -- default Win thread's size
|
|
Me@8
|
147 stackPtr = ( (char *)stackLocs + 0x100000 - 0x8 );
|
|
Me@8
|
148 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
|
|
Me@8
|
149 *(stackPtr + 4) = newPr; //rightmost param
|
|
Me@8
|
150 *stackPtr = initialData; //next param to left
|
|
Me@8
|
151 newPr->stackPtr = stackPtr; //core loop will switch to this, then
|
|
Me@8
|
152 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
|
|
Me@8
|
153
|
|
Me@8
|
154 return newPr;
|
|
Me@8
|
155 }
|
|
Me@8
|
156
|
|
Me@8
|
157
|
|
Me@8
|
158 /*This inserts the semantic-layer's data into the standard VMS carrier
|
|
Me@8
|
159 */
|
|
Me@8
|
160 inline void
|
|
Me@8
|
161 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
|
|
Me@8
|
162 { SlaveReqst *req;
|
|
Me@8
|
163
|
|
Me@8
|
164 req = malloc( sizeof(SlaveReqst) );
|
|
Me@8
|
165 req->slaveFrom = callingPr;
|
|
Me@8
|
166 req->semReqData = semReqData;
|
|
Me@8
|
167 req->nextRequest = callingPr->requests;
|
|
Me@8
|
168 callingPr->requests = req;
|
|
Me@0
|
169 }
|
|
Me@0
|
170
|
|
Me@0
|
171 /*there is a label inside this function -- save the addr of this label in
|
|
Me@0
|
172 * the callingPr struc, as the pick-up point from which to start the next
|
|
Me@0
|
173 * work-unit for that procr. If turns out have to save registers, then
|
|
Me@0
|
174 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
|
|
Me@0
|
175 * "done with work-unit" label. The procr struc is in the request in the
|
|
Me@0
|
176 * slave that animated the just-ended work-unit, so all the state is saved
|
|
Me@0
|
177 * there, and will get passed along, inside the request handler, to the
|
|
Me@0
|
178 * next work-unit for that procr.
|
|
Me@0
|
179 */
|
|
Me@8
|
180 void
|
|
Me@1
|
181 VMS__suspend_processor( VirtProcr *callingPr )
|
|
Me@8
|
182 { void *jmpPt, *stackPtr, *framePtr;
|
|
Me@0
|
183
|
|
Me@1
|
184 callingPr->nextInstrPt = &&ResumePt;
|
|
Me@1
|
185
|
|
Me@1
|
186 //return ownership of the virt procr and sched slot to Master virt pr
|
|
Me@1
|
187 callingPr->schedSlot->workIsDone = TRUE;
|
|
Me@1
|
188
|
|
Me@1
|
189 jmpPt = callingPr->coreLoopStartPt;
|
|
Me@8
|
190 stackPtr = &(callingPr->stackPtr);
|
|
Me@8
|
191 framePtr = &(callingPr->framePtr);
|
|
Me@1
|
192
|
|
Me@1
|
193 //put all regs in the clobber list to make sure GCC has saved all
|
|
Me@1
|
194 // so safe to jump to core loop, where they *will* get clobbered
|
|
Me@8
|
195 asm volatile("movl %%esp, %0; \
|
|
Me@8
|
196 movl %%ebp, %1; \
|
|
Me@8
|
197 jmp %2 "
|
|
Me@8
|
198 /* outputs */ : "=g" (stackPtr), "=g" (framePtr)
|
|
Me@1
|
199 /* inputs */ : "g" (jmpPt)
|
|
Me@1
|
200 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi"
|
|
Me@1
|
201 );
|
|
Me@1
|
202
|
|
Me@1
|
203 ResumePt:
|
|
Me@0
|
204 return;
|
|
Me@0
|
205 }
|
|
Me@0
|
206
|
|
Me@8
|
207 void
|
|
Me@8
|
208 VMS__dissipate_animating_processor( VirtProcr *animatingPr )
|
|
Me@8
|
209 {
|
|
Me@0
|
210
|
|
Me@1
|
211 }
|
|
Me@1
|
212
|
|
Me@8
|
213 /*This runs in main thread -- so can only signal to the core loop to shut
|
|
Me@8
|
214 * itself down --
|
|
Me@8
|
215 *
|
|
Me@8
|
216 *Want the master to decide when to shut down -- when semantic layer tells it
|
|
Me@8
|
217 * to -- say, when all the application-virtual processors have dissipated.
|
|
Me@8
|
218 *
|
|
Me@8
|
219 *Maybe return a special code from scheduling plug-in.. master checks and
|
|
Me@8
|
220 * when sees, it shuts down the core loops -- does this by scheduling a
|
|
Me@8
|
221 * special virt processor whose next instr pt is the core-end label.
|
|
Me@8
|
222 */
|
|
Me@8
|
223 void
|
|
Me@8
|
224 VMS__shutdown()
|
|
Me@8
|
225 { int coreIdx;
|
|
Me@8
|
226
|
|
Me@8
|
227 //Create the win threads that animate the core loops
|
|
Me@8
|
228 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
|
|
Me@8
|
229 {
|
|
Me@1
|
230
|
|
Me@8
|
231 }
|
|
Me@8
|
232 } |