annotate VMS.c @ 12:d801fe740275

Middle of testing core loop
author Me
date Sat, 19 Jun 2010 19:26:38 -0700
parents 9a1b7de19e39
children 65c8fb2821ee
rev   line source
Me@0 1 /*
Me@0 2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
Me@0 3 *
Me@0 4 * Licensed under BSD
Me@0 5 */
Me@0 6
Me@0 7 #include <stdio.h>
Me@0 8 #include <stdlib.h>
Me@0 9 #include <malloc.h>
Me@0 10
Me@0 11 #include "VMS.h"
Me@0 12 #include "Queue_impl/BlockingQueue.h"
Me@0 13
Me@0 14
Me@0 15 /*Setup has two phases:
Me@0 16 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
Me@8 17 * the master virt procr into the work-queue, ready for first "call"
Me@8 18 * 2) Semantic layer then does its own init, which creates the seed virt
Me@8 19 * procr inside the semantic layer, ready to schedule it when
Me@0 20 * asked by the first run of the masterLoop.
Me@0 21 *
Me@0 22 *This part is bit weird because VMS really wants to be "always there", and
Me@0 23 * have applications attach and detach.. for now, this VMS is part of
Me@0 24 * the app, so the VMS system starts up as part of running the app.
Me@0 25 *
Me@8 26 *The semantic layer is isolated from the VMS internals by making the
Me@8 27 * semantic layer do setup to a state that it's ready with its
Me@8 28 * initial virt procrs, ready to schedule them to slots when the masterLoop
Me@0 29 * asks. Without this pattern, the semantic layer's setup would
Me@8 30 * have to modify slots directly to assign the initial virt-procrs, and put
Me@0 31 * them into the workQ itself, breaking the isolation completely.
Me@0 32 *
Me@0 33 *
Me@8 34 *The semantic layer creates the initial virt procr(s), and adds its
Me@8 35 * own environment to masterEnv, and fills in the pointers to
Me@0 36 * the requestHandler and slaveScheduler plug-in functions
Me@8 37 */
Me@8 38
Me@8 39 void
Me@8 40 create_sched_slots( MasterEnv *masterEnv );
Me@8 41
Me@8 42
Me@8 43 /*This allocates VMS data structures, populates the master VMSProc,
Me@0 44 * and master environment, and returns the master environment to the semantic
Me@0 45 * layer.
Me@0 46 */
Me@8 47 void
Me@8 48 VMS__init()
Me@1 49 { MasterEnv *masterEnv;
Me@12 50 CASQueueStruc *workQ;
Me@1 51
Me@0 52 //Make the central work-queue
Me@12 53 _VMSWorkQ = makeCASQ();
Me@1 54 workQ = _VMSWorkQ;
Me@0 55
Me@1 56 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
Me@1 57 masterEnv = _VMSMasterEnv;
Me@0 58
Me@8 59 //create the master virtual processor
Me@8 60 masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
Me@0 61
Me@1 62 create_sched_slots( masterEnv );
Me@0 63
Me@8 64 //Set slot 0 to be the master virt procr & set flags just in case
Me@8 65 masterEnv->schedSlots[0]->needsProcrAssigned = FALSE; //says don't touch
Me@8 66 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch
Me@1 67 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr;
Me@1 68
Me@1 69 //First core loop to start up gets this, which will schedule seed Pr
Me@1 70 //TODO: debug: check address of masterVirtPr
Me@12 71 //TODO: commented out for debugging -- put it back in!!
Me@12 72 // writeCASQ( masterEnv->masterVirtPr, workQ );
Me@12 73
Me@12 74 numProcrsCreated = 1;
Me@0 75 }
Me@0 76
Me@0 77
Me@0 78 void
Me@1 79 create_sched_slots( MasterEnv *masterEnv )
Me@8 80 { SchedSlot **schedSlots, **filledSlots;
Me@0 81 int i;
Me@0 82
Me@8 83 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
Me@8 84 filledSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
Me@8 85 masterEnv->schedSlots = schedSlots;
Me@8 86 masterEnv->filledSlots = filledSlots;
Me@8 87
Me@1 88 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
Me@0 89 {
Me@8 90 schedSlots[i] = malloc( sizeof(SchedSlot) );
Me@8 91
Me@1 92 //Set state to mean "handling requests done, slot needs filling"
Me@8 93 schedSlots[i]->workIsDone = FALSE;
Me@8 94 schedSlots[i]->needsProcrAssigned = TRUE;
Me@0 95 }
Me@0 96 }
Me@0 97
Me@8 98
Me@0 99 /*Semantic layer calls this when it want the system to start running..
Me@0 100 *
Me@0 101 *This creates the core loops, pins them to physical cores, gives them the
Me@0 102 * pointer to the workQ, and starts them running.
Me@0 103 */
Me@12 104 void
Me@0 105 VMS__start()
Me@12 106 { int coreIdx;
Me@0 107
Me@8 108 //Create the win threads that animate the core loops
Me@8 109 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@8 110 {
Me@12 111 coreLoopThdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) );
Me@12 112 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
Me@0 113
Me@12 114 coreLoopThdHandles[coreIdx] =
Me@12 115 CreateThread ( NULL, // Security attributes
Me@12 116 0, // Stack size
Me@12 117 coreLoop,
Me@12 118 coreLoopThdParams[coreIdx],
Me@12 119 CREATE_SUSPENDED,
Me@12 120 &(coreLoopThdIds[coreIdx])
Me@12 121 );
Me@12 122 ResumeThread( coreLoopThdHandles[coreIdx] ); //starts thread
Me@8 123 }
Me@8 124 }
Me@0 125
Me@0 126
Me@0 127
Me@8 128 /*Create stack, then create __cdecl structure on it and put initialData and
Me@8 129 * pointer to the new structure instance into the parameter positions on
Me@8 130 * the stack
Me@8 131 *Then put function pointer into nextInstrPt -- the stack is setup in std
Me@8 132 * call structure, so jumping to function ptr is same as a GCC generated
Me@8 133 * function call
Me@8 134 *No need to save registers on old stack frame, because there's no old
Me@8 135 * animator state to return to --
Me@8 136 *
Me@8 137 */
Me@8 138 VirtProcr *
Me@8 139 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
Me@8 140 { VirtProcr *newPr;
Me@8 141 char *stackLocs, *stackPtr;
Me@8 142
Me@8 143 newPr = malloc( sizeof(VirtProcr) );
Me@12 144 newPr->procrID = numProcrsCreated++;
Me@8 145 newPr->nextInstrPt = fnPtr;
Me@8 146 newPr->initialData = initialData;
Me@8 147
Me@8 148 //alloc stack locations, make stackPtr be the highest addr minus room
Me@8 149 // for 2 params. Put initData at stackPtr, animatingPr just above
Me@8 150 stackLocs = malloc( 0x100000 ); //1 meg stack -- default Win thread's size
Me@8 151 stackPtr = ( (char *)stackLocs + 0x100000 - 0x8 );
Me@8 152 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
Me@12 153 *( (int *)stackPtr + 1) = (int) newPr; //rightmost param -- 32bit pointer
Me@12 154 *( (int *)stackPtr ) = (int) initialData; //next param to left
Me@8 155 newPr->stackPtr = stackPtr; //core loop will switch to this, then
Me@8 156 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
Me@8 157
Me@8 158 return newPr;
Me@8 159 }
Me@8 160
Me@8 161
Me@8 162 /*This inserts the semantic-layer's data into the standard VMS carrier
Me@8 163 */
Me@8 164 inline void
Me@8 165 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
Me@8 166 { SlaveReqst *req;
Me@8 167
Me@8 168 req = malloc( sizeof(SlaveReqst) );
Me@8 169 req->slaveFrom = callingPr;
Me@8 170 req->semReqData = semReqData;
Me@8 171 req->nextRequest = callingPr->requests;
Me@8 172 callingPr->requests = req;
Me@0 173 }
Me@0 174
Me@0 175 /*there is a label inside this function -- save the addr of this label in
Me@0 176 * the callingPr struc, as the pick-up point from which to start the next
Me@0 177 * work-unit for that procr. If turns out have to save registers, then
Me@0 178 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
Me@0 179 * "done with work-unit" label. The procr struc is in the request in the
Me@0 180 * slave that animated the just-ended work-unit, so all the state is saved
Me@0 181 * there, and will get passed along, inside the request handler, to the
Me@0 182 * next work-unit for that procr.
Me@0 183 */
Me@8 184 void
Me@1 185 VMS__suspend_processor( VirtProcr *callingPr )
Me@8 186 { void *jmpPt, *stackPtr, *framePtr;
Me@0 187
Me@1 188 callingPr->nextInstrPt = &&ResumePt;
Me@1 189
Me@1 190 //return ownership of the virt procr and sched slot to Master virt pr
Me@1 191 callingPr->schedSlot->workIsDone = TRUE;
Me@1 192
Me@1 193 jmpPt = callingPr->coreLoopStartPt;
Me@8 194 stackPtr = &(callingPr->stackPtr);
Me@8 195 framePtr = &(callingPr->framePtr);
Me@1 196
Me@1 197 //put all regs in the clobber list to make sure GCC has saved all
Me@1 198 // so safe to jump to core loop, where they *will* get clobbered
Me@8 199 asm volatile("movl %%esp, %0; \
Me@8 200 movl %%ebp, %1; \
Me@8 201 jmp %2 "
Me@8 202 /* outputs */ : "=g" (stackPtr), "=g" (framePtr)
Me@1 203 /* inputs */ : "g" (jmpPt)
Me@1 204 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi"
Me@12 205 ); //list everything as clobbered to force GCC to save all
Me@12 206 // live vars that are in regs on stack before this
Me@12 207 // assembly, so that stack pointer is correct, before jmp
Me@1 208
Me@1 209 ResumePt:
Me@0 210 return;
Me@0 211 }
Me@0 212
Me@8 213 void
Me@8 214 VMS__dissipate_animating_processor( VirtProcr *animatingPr )
Me@8 215 {
Me@0 216
Me@1 217 }
Me@1 218
Me@8 219 /*This runs in main thread -- so can only signal to the core loop to shut
Me@8 220 * itself down --
Me@8 221 *
Me@8 222 *Want the master to decide when to shut down -- when semantic layer tells it
Me@8 223 * to -- say, when all the application-virtual processors have dissipated.
Me@8 224 *
Me@8 225 *Maybe return a special code from scheduling plug-in.. master checks and
Me@8 226 * when sees, it shuts down the core loops -- does this by scheduling a
Me@8 227 * special virt processor whose next instr pt is the core-end label.
Me@8 228 */
Me@8 229 void
Me@8 230 VMS__shutdown()
Me@8 231 { int coreIdx;
Me@8 232
Me@8 233 //Create the win threads that animate the core loops
Me@8 234 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@8 235 {
Me@1 236
Me@8 237 }
Me@12 238 }
Me@12 239
Me@12 240
Me@12 241
Me@12 242 inline TSCount getTSCount()
Me@12 243 { unsigned int low, high;
Me@12 244 TSCount out;
Me@12 245
Me@12 246 saveTimeStampCountInto( low, high );
Me@12 247 out = high;
Me@12 248 out = (out << 32) + low;
Me@12 249 return out;
Me@12 250 }
Me@12 251