comparison VMS.c @ 0:a5fe730dfc2e

Initial add -- for sourceforge repositories
author Me
date Sat, 22 May 2010 19:37:58 -0700
parents
children cf5007e51b96
comparison
equal deleted inserted replaced
-1:000000000000 0:45391308a69a
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
6
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
10
11 #include "VMS.h"
12 #include "Queue_impl/BlockingQueue.h"
13
14
15 /*Setup has two phases:
16 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
17 * the master work-unit into the work-queue
18 * 2) Semantic layer then does its own init, which creates the initial
19 * work-units inside the semantic layer, ready to schedule them when
20 * asked by the first run of the masterLoop.
21 *
22 *This part is bit weird because VMS really wants to be "always there", and
23 * have applications attach and detach.. for now, this VMS is part of
24 * the app, so the VMS system starts up as part of running the app.
25 *
26 *The semantic layer is fully isolated from the VMS internasl by
27 * making the semantic layer setup into a state that it's ready with its
28 * initial work-units, ready to schedule them to slaves when the masterLoop
29 * asks. Without this pattern, the semantic layer's setup would
30 * have to modify slaves directly to assign the initial work-units, and put
31 * them into the workQ itself, breaking the isolation completely.
32 *
33 *
34 *The semantic layer creates the initial work-unit(s), and adds its
35 * own environment data to masterEnv, and fills in the pointers to
36 * the requestHandler and slaveScheduler plug-in functions
37 *
38 *This allocates VMS data structures, populates the master VMSProc,
39 * and master environment, and returns the master environment to the semantic
40 * layer.
41 */
42 //Global vars are all inside VMS.h
43 MasterEnv *
44 init_VMS( )
45 {
46 //Make the central work-queue
47 workQ = makeQ();
48
49 masterEnv = malloc( sizeof(MasterEnv) );
50
51 create_master( masterEnv );
52
53 create_slaves( masterEnv );
54
55 //When coreLoops start up, the first thing
56 writeQ( masterEnv->masterWorkUnit, workQ );
57 }
58
59
60
61 /*Fill up the virtual master data structure, which is already alloc'd in the
62 * masterEnv.
63 *The virtual Master is the same structure as a virtual slave, but it
64 * isn't in the array of virtual slaves.
65 * The reason it's the same structure is so that the coreLoop doesn't
66 * have to differentiate -- all work units are assigned to a VMSProcr, and
67 * the core loop treats them all the same way, whether it's the virtual
68 * master continuation or a slave's work-unit.
69 *Note: masterLoop is jumped into an back out of, so have to be careful with
70 * register usage and saving all persistent-across-calls state to masterEnv
71 */
72 void
73 create_master( MasterEnv *masterEnv )
74 { VMSProcr virtMaster;
75
76 virtMaster = &(masterEnv->virtMaster);
77 virtMaster->workUnitToDo = malloc( sizeof( WorkUnit ) );
78 virtMaster->workUnitToDo->workData = masterEnv;
79 //TODO: figure out call structure: what GCC will do with regs
80 // will jump to the masterLoop from the coreLoop -- what regs need
81 // saving, from before jump to after -- and what reg to put masterEnv
82 // pointer in when jump to masterLoop
83 virtMaster->workUnitToDo->addrToJumpTo = &masterLoop;
84 virtMaster->workUnitToDo->slaveAssignedTo = virtMaster;
85 }
86
87 void
88 create_slaves( MasterEnv *masterEnv )
89 { VMSProcr *virtSlaves;
90 int i;
91
92 virtSlaves = masterEnv->virtSlaves; //TODO: make sure this is right
93 for( i = 0; i < NUM_SLAVES; i++ )
94 {
95 //Set state to mean "everything done, schedule work to slave"
96 virtSlaves[i].workIsDone = FALSE;
97 virtSlaves[i].needsWorkAssigned = TRUE;
98 }
99 }
100
101 /*Semantic layer calls this when it want the system to start running..
102 *
103 *This creates the core loops, pins them to physical cores, gives them the
104 * pointer to the workQ, and starts them running.
105 */
106 void
107 VMS__start()
108 { int retCode, coreIdx;
109
110 //TODO: still just skeleton code -- figure out right way to do this
111
112 //Create the PThread loops that take from work-queue, and start them
113 for( coreIdx=0; coreIdx < NUM_WORKERS; coreIdx++ )
114 {
115 thdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) );
116 thdParams[coreIdx]->workQ = workQ;
117 thdParams[coreIdx]->id = coreIdx;
118
119 //Now make and start thd.. the coreLoopThds entry
120 // has all the info needed to later stop the thread.
121 retCode =
122 pthread_create( &(coreLoopThds[coreIdx]), thdAttrs, &coreLoop,
123 (void *)(thdParams[coreIdx]) );
124 if( retCode != 0 )
125 { //error
126 printf("ERROR creating coreLoop %d, code: %d\n", coreIdx, retCode);
127 exit(-1);
128 }
129
130 pinThdToCore( ); //figure out how to specify this..
131
132 startThd(); //look up PThread call to start the thread running, if it's
133 // not automatic
134 }
135 }
136
137 /*there is a label inside this function -- save the addr of this label in
138 * the callingPr struc, as the pick-up point from which to start the next
139 * work-unit for that procr. If turns out have to save registers, then
140 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
141 * "done with work-unit" label. The procr struc is in the request in the
142 * slave that animated the just-ended work-unit, so all the state is saved
143 * there, and will get passed along, inside the request handler, to the
144 * next work-unit for that procr.
145 */
146 VMS__save_ret_and_jump_to_CoreLoop( callingPr )
147 {
148 //TODO: figure out how to save the addr of a label into a mem loc
149 //NOTE: because resume pt is inside the VMS fn, it's always the same, no
150 // matter what the semantic layer is, no matter what semantic libr called.
151 callingPr->resumePt = &resumeNextWorkUnitPt;
152 save_processor_state_in( callingPr ); //save x86 regs, if GCC needs it to
153 coreLoopRetPt = callingPr->coreLoopRetPt;
154 //TODO: figure out how to do jump correctly -- target addr is constant
155 asm( jmp coreLoopRetPt );
156
157 resumeNextWorkUnitPt:
158 return;
159 }
160
161
162 /*The semantic virt procr is available in the request sent from the slave
163 *
164 * The request handler has to add the work-unit created to the semantic
165 * virtual processor the work-unit is a section of its time-line -- does this when create the
166 * work-unit -- means the procr data struc is available in the request sent
167 * from the slave, from which the new work-unit is generated..
168 */
169 VMS__add_request_to_slave( SlaveReqst req, VMSProcr callingPr )
170 { VMSProcr slave;
171 slave = callingPr->workUnit->currSlave
172 req->nextRequest = callingPr->workUnit->currSlave->requests = req;
173 }
174
175
176