Me@0: /* Me@0: * Copyright 2010 OpenSourceCodeStewardshipFoundation Me@0: * Me@0: * Licensed under BSD Me@0: */ Me@0: Me@0: #include Me@0: #include Me@0: #include Me@0: Me@0: #include "VMS.h" Me@0: #include "Queue_impl/BlockingQueue.h" Me@0: Me@0: Me@0: /*Setup has two phases: Me@0: * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts Me@0: * the master work-unit into the work-queue Me@0: * 2) Semantic layer then does its own init, which creates the initial Me@0: * work-units inside the semantic layer, ready to schedule them when Me@0: * asked by the first run of the masterLoop. Me@0: * Me@0: *This part is bit weird because VMS really wants to be "always there", and Me@0: * have applications attach and detach.. for now, this VMS is part of Me@0: * the app, so the VMS system starts up as part of running the app. Me@0: * Me@0: *The semantic layer is fully isolated from the VMS internasl by Me@0: * making the semantic layer setup into a state that it's ready with its Me@0: * initial work-units, ready to schedule them to slaves when the masterLoop Me@0: * asks. Without this pattern, the semantic layer's setup would Me@0: * have to modify slaves directly to assign the initial work-units, and put Me@0: * them into the workQ itself, breaking the isolation completely. Me@0: * Me@0: * Me@0: *The semantic layer creates the initial work-unit(s), and adds its Me@0: * own environment data to masterEnv, and fills in the pointers to Me@0: * the requestHandler and slaveScheduler plug-in functions Me@0: * Me@0: *This allocates VMS data structures, populates the master VMSProc, Me@0: * and master environment, and returns the master environment to the semantic Me@0: * layer. Me@0: */ Me@0: //Global vars are all inside VMS.h Me@0: MasterEnv * Me@0: init_VMS( ) Me@0: { Me@0: //Make the central work-queue Me@0: workQ = makeQ(); Me@0: Me@0: masterEnv = malloc( sizeof(MasterEnv) ); Me@0: Me@0: create_master( masterEnv ); Me@0: Me@0: create_slaves( masterEnv ); Me@0: Me@0: //When coreLoops start up, the first thing Me@0: writeQ( masterEnv->masterWorkUnit, workQ ); Me@0: } Me@0: Me@0: Me@0: Me@0: /*Fill up the virtual master data structure, which is already alloc'd in the Me@0: * masterEnv. Me@0: *The virtual Master is the same structure as a virtual slave, but it Me@0: * isn't in the array of virtual slaves. Me@0: * The reason it's the same structure is so that the coreLoop doesn't Me@0: * have to differentiate -- all work units are assigned to a VMSProcr, and Me@0: * the core loop treats them all the same way, whether it's the virtual Me@0: * master continuation or a slave's work-unit. Me@0: *Note: masterLoop is jumped into an back out of, so have to be careful with Me@0: * register usage and saving all persistent-across-calls state to masterEnv Me@0: */ Me@0: void Me@0: create_master( MasterEnv *masterEnv ) Me@0: { VMSProcr virtMaster; Me@0: Me@0: virtMaster = &(masterEnv->virtMaster); Me@0: virtMaster->workUnitToDo = malloc( sizeof( WorkUnit ) ); Me@0: virtMaster->workUnitToDo->workData = masterEnv; Me@0: //TODO: figure out call structure: what GCC will do with regs Me@0: // will jump to the masterLoop from the coreLoop -- what regs need Me@0: // saving, from before jump to after -- and what reg to put masterEnv Me@0: // pointer in when jump to masterLoop Me@0: virtMaster->workUnitToDo->addrToJumpTo = &masterLoop; Me@0: virtMaster->workUnitToDo->slaveAssignedTo = virtMaster; Me@0: } Me@0: Me@0: void Me@0: create_slaves( MasterEnv *masterEnv ) Me@0: { VMSProcr *virtSlaves; Me@0: int i; Me@0: Me@0: virtSlaves = masterEnv->virtSlaves; //TODO: make sure this is right Me@0: for( i = 0; i < NUM_SLAVES; i++ ) Me@0: { Me@0: //Set state to mean "everything done, schedule work to slave" Me@0: virtSlaves[i].workIsDone = FALSE; Me@0: virtSlaves[i].needsWorkAssigned = TRUE; Me@0: } Me@0: } Me@0: Me@0: /*Semantic layer calls this when it want the system to start running.. Me@0: * Me@0: *This creates the core loops, pins them to physical cores, gives them the Me@0: * pointer to the workQ, and starts them running. Me@0: */ Me@0: void Me@0: VMS__start() Me@0: { int retCode, coreIdx; Me@0: Me@0: //TODO: still just skeleton code -- figure out right way to do this Me@0: Me@0: //Create the PThread loops that take from work-queue, and start them Me@0: for( coreIdx=0; coreIdx < NUM_WORKERS; coreIdx++ ) Me@0: { Me@0: thdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) ); Me@0: thdParams[coreIdx]->workQ = workQ; Me@0: thdParams[coreIdx]->id = coreIdx; Me@0: Me@0: //Now make and start thd.. the coreLoopThds entry Me@0: // has all the info needed to later stop the thread. Me@0: retCode = Me@0: pthread_create( &(coreLoopThds[coreIdx]), thdAttrs, &coreLoop, Me@0: (void *)(thdParams[coreIdx]) ); Me@0: if( retCode != 0 ) Me@0: { //error Me@0: printf("ERROR creating coreLoop %d, code: %d\n", coreIdx, retCode); Me@0: exit(-1); Me@0: } Me@0: Me@0: pinThdToCore( ); //figure out how to specify this.. Me@0: Me@0: startThd(); //look up PThread call to start the thread running, if it's Me@0: // not automatic Me@0: } Me@0: } Me@0: Me@0: /*there is a label inside this function -- save the addr of this label in Me@0: * the callingPr struc, as the pick-up point from which to start the next Me@0: * work-unit for that procr. If turns out have to save registers, then Me@0: * save them in the procr struc too. Then do assembly jump to the CoreLoop's Me@0: * "done with work-unit" label. The procr struc is in the request in the Me@0: * slave that animated the just-ended work-unit, so all the state is saved Me@0: * there, and will get passed along, inside the request handler, to the Me@0: * next work-unit for that procr. Me@0: */ Me@0: VMS__save_ret_and_jump_to_CoreLoop( callingPr ) Me@0: { Me@0: //TODO: figure out how to save the addr of a label into a mem loc Me@0: //NOTE: because resume pt is inside the VMS fn, it's always the same, no Me@0: // matter what the semantic layer is, no matter what semantic libr called. Me@0: callingPr->resumePt = &resumeNextWorkUnitPt; Me@0: save_processor_state_in( callingPr ); //save x86 regs, if GCC needs it to Me@0: coreLoopRetPt = callingPr->coreLoopRetPt; Me@0: //TODO: figure out how to do jump correctly -- target addr is constant Me@0: asm( jmp coreLoopRetPt ); Me@0: Me@0: resumeNextWorkUnitPt: Me@0: return; Me@0: } Me@0: Me@0: Me@0: /*The semantic virt procr is available in the request sent from the slave Me@0: * Me@0: * The request handler has to add the work-unit created to the semantic Me@0: * virtual processor the work-unit is a section of its time-line -- does this when create the Me@0: * work-unit -- means the procr data struc is available in the request sent Me@0: * from the slave, from which the new work-unit is generated.. Me@0: */ Me@0: VMS__add_request_to_slave( SlaveReqst req, VMSProcr callingPr ) Me@0: { VMSProcr slave; Me@0: slave = callingPr->workUnit->currSlave Me@0: req->nextRequest = callingPr->workUnit->currSlave->requests = req; Me@0: } Me@0: Me@0: Me@0: