annotate VMS.c @ 0:a5fe730dfc2e

Initial add -- for sourceforge repositories
author Me
date Sat, 22 May 2010 19:37:58 -0700
parents
children cf5007e51b96
rev   line source
Me@0 1 /*
Me@0 2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
Me@0 3 *
Me@0 4 * Licensed under BSD
Me@0 5 */
Me@0 6
Me@0 7 #include <stdio.h>
Me@0 8 #include <stdlib.h>
Me@0 9 #include <malloc.h>
Me@0 10
Me@0 11 #include "VMS.h"
Me@0 12 #include "Queue_impl/BlockingQueue.h"
Me@0 13
Me@0 14
Me@0 15 /*Setup has two phases:
Me@0 16 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
Me@0 17 * the master work-unit into the work-queue
Me@0 18 * 2) Semantic layer then does its own init, which creates the initial
Me@0 19 * work-units inside the semantic layer, ready to schedule them when
Me@0 20 * asked by the first run of the masterLoop.
Me@0 21 *
Me@0 22 *This part is bit weird because VMS really wants to be "always there", and
Me@0 23 * have applications attach and detach.. for now, this VMS is part of
Me@0 24 * the app, so the VMS system starts up as part of running the app.
Me@0 25 *
Me@0 26 *The semantic layer is fully isolated from the VMS internasl by
Me@0 27 * making the semantic layer setup into a state that it's ready with its
Me@0 28 * initial work-units, ready to schedule them to slaves when the masterLoop
Me@0 29 * asks. Without this pattern, the semantic layer's setup would
Me@0 30 * have to modify slaves directly to assign the initial work-units, and put
Me@0 31 * them into the workQ itself, breaking the isolation completely.
Me@0 32 *
Me@0 33 *
Me@0 34 *The semantic layer creates the initial work-unit(s), and adds its
Me@0 35 * own environment data to masterEnv, and fills in the pointers to
Me@0 36 * the requestHandler and slaveScheduler plug-in functions
Me@0 37 *
Me@0 38 *This allocates VMS data structures, populates the master VMSProc,
Me@0 39 * and master environment, and returns the master environment to the semantic
Me@0 40 * layer.
Me@0 41 */
Me@0 42 //Global vars are all inside VMS.h
Me@0 43 MasterEnv *
Me@0 44 init_VMS( )
Me@0 45 {
Me@0 46 //Make the central work-queue
Me@0 47 workQ = makeQ();
Me@0 48
Me@0 49 masterEnv = malloc( sizeof(MasterEnv) );
Me@0 50
Me@0 51 create_master( masterEnv );
Me@0 52
Me@0 53 create_slaves( masterEnv );
Me@0 54
Me@0 55 //When coreLoops start up, the first thing
Me@0 56 writeQ( masterEnv->masterWorkUnit, workQ );
Me@0 57 }
Me@0 58
Me@0 59
Me@0 60
Me@0 61 /*Fill up the virtual master data structure, which is already alloc'd in the
Me@0 62 * masterEnv.
Me@0 63 *The virtual Master is the same structure as a virtual slave, but it
Me@0 64 * isn't in the array of virtual slaves.
Me@0 65 * The reason it's the same structure is so that the coreLoop doesn't
Me@0 66 * have to differentiate -- all work units are assigned to a VMSProcr, and
Me@0 67 * the core loop treats them all the same way, whether it's the virtual
Me@0 68 * master continuation or a slave's work-unit.
Me@0 69 *Note: masterLoop is jumped into an back out of, so have to be careful with
Me@0 70 * register usage and saving all persistent-across-calls state to masterEnv
Me@0 71 */
Me@0 72 void
Me@0 73 create_master( MasterEnv *masterEnv )
Me@0 74 { VMSProcr virtMaster;
Me@0 75
Me@0 76 virtMaster = &(masterEnv->virtMaster);
Me@0 77 virtMaster->workUnitToDo = malloc( sizeof( WorkUnit ) );
Me@0 78 virtMaster->workUnitToDo->workData = masterEnv;
Me@0 79 //TODO: figure out call structure: what GCC will do with regs
Me@0 80 // will jump to the masterLoop from the coreLoop -- what regs need
Me@0 81 // saving, from before jump to after -- and what reg to put masterEnv
Me@0 82 // pointer in when jump to masterLoop
Me@0 83 virtMaster->workUnitToDo->addrToJumpTo = &masterLoop;
Me@0 84 virtMaster->workUnitToDo->slaveAssignedTo = virtMaster;
Me@0 85 }
Me@0 86
Me@0 87 void
Me@0 88 create_slaves( MasterEnv *masterEnv )
Me@0 89 { VMSProcr *virtSlaves;
Me@0 90 int i;
Me@0 91
Me@0 92 virtSlaves = masterEnv->virtSlaves; //TODO: make sure this is right
Me@0 93 for( i = 0; i < NUM_SLAVES; i++ )
Me@0 94 {
Me@0 95 //Set state to mean "everything done, schedule work to slave"
Me@0 96 virtSlaves[i].workIsDone = FALSE;
Me@0 97 virtSlaves[i].needsWorkAssigned = TRUE;
Me@0 98 }
Me@0 99 }
Me@0 100
Me@0 101 /*Semantic layer calls this when it want the system to start running..
Me@0 102 *
Me@0 103 *This creates the core loops, pins them to physical cores, gives them the
Me@0 104 * pointer to the workQ, and starts them running.
Me@0 105 */
Me@0 106 void
Me@0 107 VMS__start()
Me@0 108 { int retCode, coreIdx;
Me@0 109
Me@0 110 //TODO: still just skeleton code -- figure out right way to do this
Me@0 111
Me@0 112 //Create the PThread loops that take from work-queue, and start them
Me@0 113 for( coreIdx=0; coreIdx < NUM_WORKERS; coreIdx++ )
Me@0 114 {
Me@0 115 thdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) );
Me@0 116 thdParams[coreIdx]->workQ = workQ;
Me@0 117 thdParams[coreIdx]->id = coreIdx;
Me@0 118
Me@0 119 //Now make and start thd.. the coreLoopThds entry
Me@0 120 // has all the info needed to later stop the thread.
Me@0 121 retCode =
Me@0 122 pthread_create( &(coreLoopThds[coreIdx]), thdAttrs, &coreLoop,
Me@0 123 (void *)(thdParams[coreIdx]) );
Me@0 124 if( retCode != 0 )
Me@0 125 { //error
Me@0 126 printf("ERROR creating coreLoop %d, code: %d\n", coreIdx, retCode);
Me@0 127 exit(-1);
Me@0 128 }
Me@0 129
Me@0 130 pinThdToCore( ); //figure out how to specify this..
Me@0 131
Me@0 132 startThd(); //look up PThread call to start the thread running, if it's
Me@0 133 // not automatic
Me@0 134 }
Me@0 135 }
Me@0 136
Me@0 137 /*there is a label inside this function -- save the addr of this label in
Me@0 138 * the callingPr struc, as the pick-up point from which to start the next
Me@0 139 * work-unit for that procr. If turns out have to save registers, then
Me@0 140 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
Me@0 141 * "done with work-unit" label. The procr struc is in the request in the
Me@0 142 * slave that animated the just-ended work-unit, so all the state is saved
Me@0 143 * there, and will get passed along, inside the request handler, to the
Me@0 144 * next work-unit for that procr.
Me@0 145 */
Me@0 146 VMS__save_ret_and_jump_to_CoreLoop( callingPr )
Me@0 147 {
Me@0 148 //TODO: figure out how to save the addr of a label into a mem loc
Me@0 149 //NOTE: because resume pt is inside the VMS fn, it's always the same, no
Me@0 150 // matter what the semantic layer is, no matter what semantic libr called.
Me@0 151 callingPr->resumePt = &resumeNextWorkUnitPt;
Me@0 152 save_processor_state_in( callingPr ); //save x86 regs, if GCC needs it to
Me@0 153 coreLoopRetPt = callingPr->coreLoopRetPt;
Me@0 154 //TODO: figure out how to do jump correctly -- target addr is constant
Me@0 155 asm( jmp coreLoopRetPt );
Me@0 156
Me@0 157 resumeNextWorkUnitPt:
Me@0 158 return;
Me@0 159 }
Me@0 160
Me@0 161
Me@0 162 /*The semantic virt procr is available in the request sent from the slave
Me@0 163 *
Me@0 164 * The request handler has to add the work-unit created to the semantic
Me@0 165 * virtual processor the work-unit is a section of its time-line -- does this when create the
Me@0 166 * work-unit -- means the procr data struc is available in the request sent
Me@0 167 * from the slave, from which the new work-unit is generated..
Me@0 168 */
Me@0 169 VMS__add_request_to_slave( SlaveReqst req, VMSProcr callingPr )
Me@0 170 { VMSProcr slave;
Me@0 171 slave = callingPr->workUnit->currSlave
Me@0 172 req->nextRequest = callingPr->workUnit->currSlave->requests = req;
Me@0 173 }
Me@0 174
Me@0 175
Me@0 176