diff VMS.c @ 0:a5fe730dfc2e

Initial add -- for sourceforge repositories
author Me
date Sat, 22 May 2010 19:37:58 -0700
parents
children cf5007e51b96
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/VMS.c	Sat May 22 19:37:58 2010 -0700
     1.3 @@ -0,0 +1,176 @@
     1.4 +/*
     1.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
     1.6 + *
     1.7 + * Licensed under BSD
     1.8 + */
     1.9 +
    1.10 +#include <stdio.h>
    1.11 +#include <stdlib.h>
    1.12 +#include <malloc.h>
    1.13 +
    1.14 +#include "VMS.h"
    1.15 +#include "Queue_impl/BlockingQueue.h"
    1.16 +
    1.17 +
    1.18 +/*Setup has two phases:
    1.19 + * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
    1.20 + *    the master work-unit into the work-queue
    1.21 + * 2) Semantic layer then does its own init, which creates the initial
    1.22 + *    work-units inside the semantic layer, ready to schedule them when
    1.23 + *    asked by the first run of the masterLoop.
    1.24 + *
    1.25 + *This part is bit weird because VMS really wants to be "always there", and
    1.26 + * have applications attach and detach..  for now, this VMS is part of
    1.27 + * the app, so the VMS system starts up as part of running the app.
    1.28 + *
    1.29 + *The semantic layer is fully isolated from the VMS internasl by
    1.30 + * making the semantic layer setup into a state that it's ready with its
    1.31 + * initial work-units, ready to schedule them to slaves when the masterLoop
    1.32 + * asks.  Without this pattern, the semantic layer's setup would
    1.33 + * have to modify slaves directly to assign the initial work-units, and put
    1.34 + * them into the workQ itself, breaking the isolation completely.
    1.35 + *
    1.36 + * 
    1.37 + *The semantic layer creates the initial work-unit(s), and adds its
    1.38 + * own environment data to masterEnv, and fills in the pointers to
    1.39 + * the requestHandler and slaveScheduler plug-in functions
    1.40 + *
    1.41 + *This allocates VMS data structures, populates the master VMSProc,
    1.42 + * and master environment, and returns the master environment to the semantic
    1.43 + * layer.
    1.44 + */
    1.45 +   //Global vars are all inside VMS.h
    1.46 +MasterEnv *
    1.47 +init_VMS(  )
    1.48 + {
    1.49 +      //Make the central work-queue
    1.50 +   workQ = makeQ();
    1.51 +
    1.52 +   masterEnv = malloc( sizeof(MasterEnv) );
    1.53 +
    1.54 +   create_master( masterEnv );
    1.55 +
    1.56 +   create_slaves( masterEnv );
    1.57 +
    1.58 +      //When coreLoops start up, the first thing 
    1.59 +   writeQ( masterEnv->masterWorkUnit, workQ );
    1.60 + }
    1.61 +
    1.62 +
    1.63 +
    1.64 +/*Fill up the virtual master data structure, which is already alloc'd in the
    1.65 + * masterEnv.
    1.66 + *The virtual Master is the same structure as a virtual slave, but it
    1.67 + * isn't in the array of virtual slaves.
    1.68 + * The reason it's the same structure is so that the coreLoop doesn't
    1.69 + *  have to differentiate -- all work units are assigned to a VMSProcr, and
    1.70 + *  the core loop treats them all the same way, whether it's the virtual
    1.71 + *  master continuation or a slave's work-unit.
    1.72 + *Note: masterLoop is jumped into an back out of, so have to be careful with
    1.73 + * register usage and saving all persistent-across-calls state to masterEnv
    1.74 + */
    1.75 +void
    1.76 +create_master( MasterEnv *masterEnv )
    1.77 + { VMSProcr virtMaster;
    1.78 +
    1.79 +   virtMaster = &(masterEnv->virtMaster);
    1.80 +   virtMaster->workUnitToDo                  = malloc( sizeof( WorkUnit ) );
    1.81 +   virtMaster->workUnitToDo->workData        = masterEnv;
    1.82 +      //TODO: figure out call structure: what GCC will do with regs
    1.83 +      // will jump to the masterLoop from the coreLoop -- what regs need
    1.84 +      // saving, from before jump to after -- and what reg to put masterEnv
    1.85 +      // pointer in when jump to masterLoop
    1.86 +   virtMaster->workUnitToDo->addrToJumpTo    = &masterLoop;
    1.87 +   virtMaster->workUnitToDo->slaveAssignedTo = virtMaster;
    1.88 + }
    1.89 +
    1.90 +void
    1.91 +create_slaves( MasterEnv *masterEnv )
    1.92 + { VMSProcr *virtSlaves;
    1.93 +   int i;
    1.94 +
    1.95 +   virtSlaves = masterEnv->virtSlaves;  //TODO: make sure this is right
    1.96 +   for( i = 0; i < NUM_SLAVES; i++ )
    1.97 +    {
    1.98 +         //Set state to mean "everything done, schedule work to slave"
    1.99 +      virtSlaves[i].workIsDone        = FALSE;
   1.100 +      virtSlaves[i].needsWorkAssigned = TRUE;
   1.101 +    }
   1.102 + }
   1.103 +
   1.104 +/*Semantic layer calls this when it want the system to start running..
   1.105 + *
   1.106 + *This creates the core loops, pins them to physical cores, gives them the
   1.107 + * pointer to the workQ, and starts them running.
   1.108 + */
   1.109 + void
   1.110 +VMS__start()
   1.111 + { int retCode, coreIdx;
   1.112 +
   1.113 +//TODO: still just skeleton code -- figure out right way to do this
   1.114 +
   1.115 +      //Create the PThread loops that take from work-queue, and start them
   1.116 +   for( coreIdx=0; coreIdx < NUM_WORKERS; coreIdx++ )
   1.117 +    {
   1.118 +      thdParams[coreIdx]        = (ThdParams *)malloc( sizeof(ThdParams) );
   1.119 +      thdParams[coreIdx]->workQ = workQ;
   1.120 +      thdParams[coreIdx]->id    = coreIdx;
   1.121 +
   1.122 +         //Now make and start thd..  the coreLoopThds entry
   1.123 +         // has all the info needed to later stop the thread.
   1.124 +      retCode =
   1.125 +       pthread_create( &(coreLoopThds[coreIdx]), thdAttrs, &coreLoop,
   1.126 +                       (void *)(thdParams[coreIdx]) );
   1.127 +      if( retCode != 0 )
   1.128 +       { //error
   1.129 +         printf("ERROR creating coreLoop %d, code: %d\n", coreIdx, retCode);
   1.130 +         exit(-1);
   1.131 +       }
   1.132 +
   1.133 +      pinThdToCore( );  //figure out how to specify this..
   1.134 +
   1.135 +      startThd(); //look up PThread call to start the thread running, if it's
   1.136 +                  // not automatic
   1.137 +    }
   1.138 + }
   1.139 +
   1.140 + /*there is a label inside this function -- save the addr of this label in
   1.141 + * the callingPr struc, as the pick-up point from which to start the next
   1.142 + * work-unit for that procr.  If turns out have to save registers, then
   1.143 + * save them in the procr struc too.  Then do assembly jump to the CoreLoop's
   1.144 + * "done with work-unit" label.  The procr struc is in the request in the
   1.145 + * slave that animated the just-ended work-unit, so all the state is saved
   1.146 + * there, and will get passed along, inside the request handler, to the
   1.147 + * next work-unit for that procr.
   1.148 + */
   1.149 +VMS__save_ret_and_jump_to_CoreLoop( callingPr )
   1.150 + {
   1.151 +   //TODO: figure out how to save the addr of a label into a mem loc
   1.152 +   //NOTE: because resume pt is inside the VMS fn, it's always the same, no
   1.153 +   // matter what the semantic layer is, no matter what semantic libr called.
   1.154 +   callingPr->resumePt = &resumeNextWorkUnitPt;
   1.155 +   save_processor_state_in( callingPr ); //save x86 regs, if GCC needs it to
   1.156 +   coreLoopRetPt = callingPr->coreLoopRetPt;
   1.157 +   //TODO: figure out how to do jump correctly -- target addr is constant
   1.158 +   asm( jmp coreLoopRetPt );
   1.159 +
   1.160 +resumeNextWorkUnitPt:
   1.161 +   return;
   1.162 + }
   1.163 +
   1.164 +
   1.165 +/*The semantic virt procr is available in the request sent from the slave
   1.166 + * 
   1.167 + * The request handler has to add the work-unit created to the semantic
   1.168 + * virtual processor the work-unit is a section of its time-line -- does this when create the
   1.169 + * work-unit -- means the procr data struc is available in the request sent
   1.170 + * from the slave, from which the new work-unit is generated..
   1.171 + */
   1.172 +VMS__add_request_to_slave( SlaveReqst req, VMSProcr callingPr )
   1.173 + { VMSProcr slave;
   1.174 +   slave = callingPr->workUnit->currSlave
   1.175 + req->nextRequest =  callingPr->workUnit->currSlave->requests = req;
   1.176 + }
   1.177 +
   1.178 +
   1.179 +